Implement experimental Vulkan backend

This commit is contained in:
Stenzek 2016-08-13 22:57:50 +10:00
parent fdd954e7e7
commit 77a128ab87
59 changed files with 14533 additions and 1 deletions

View File

@ -233,6 +233,7 @@ set(LIBS
sfml-network
sfml-system
videonull
videovulkan
videoogl
videosoftware
z

View File

@ -207,6 +207,9 @@
<ProjectReference Include="..\VideoBackends\D3D12\D3D12.vcxproj">
<Project>{570215b7-e32f-4438-95ae-c8d955f9fca3}</Project>
</ProjectReference>
<ProjectReference Include="..\VideoBackends\Vulkan\Vulkan.vcxproj">
<Project>{29f29a19-f141-45ad-9679-5a2923b49da3}</Project>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">

View File

@ -238,6 +238,9 @@
<ProjectReference Include="$(CoreDir)VideoBackends\Null\Null.vcxproj">
<Project>{53A5391B-737E-49A8-BC8F-312ADA00736F}</Project>
</ProjectReference>
<ProjectReference Include="$(CoreDir)VideoBackends\Vulkan\Vulkan.vcxproj">
<Project>{29F29A19-F141-45AD-9679-5A2923B49DA3}</Project>
</ProjectReference>
<ProjectReference Include="$(CoreDir)VideoCommon\VideoCommon.vcxproj">
<Project>{3de9ee35-3e91-4f27-a014-2866ad8c3fe3}</Project>
</ProjectReference>

View File

@ -1,4 +1,5 @@
add_subdirectory(OGL)
add_subdirectory(Null)
add_subdirectory(Software)
add_subdirectory(Vulkan)
# TODO: Add other backends here!

View File

@ -0,0 +1,249 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <vector>
#include "Common/Assert.h"
#include "VideoBackends/Vulkan/BoundingBox.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
#include "VideoBackends/Vulkan/StagingBuffer.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
BoundingBox::BoundingBox()
{
}
BoundingBox::~BoundingBox()
{
if (m_gpu_buffer != VK_NULL_HANDLE)
{
vkDestroyBuffer(g_vulkan_context->GetDevice(), m_gpu_buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), m_gpu_memory, nullptr);
}
}
bool BoundingBox::Initialize()
{
if (!g_vulkan_context->SupportsBoundingBox())
{
WARN_LOG(VIDEO, "Vulkan: Bounding box is unsupported by your device.");
return true;
}
if (!CreateGPUBuffer())
return false;
if (!CreateReadbackBuffer())
return false;
return true;
}
void BoundingBox::Flush(StateTracker* state_tracker)
{
if (m_gpu_buffer == VK_NULL_HANDLE)
return;
// Combine updates together, chances are the game would have written all 4.
bool updated_buffer = false;
for (size_t start = 0; start < 4; start++)
{
if (!m_values_dirty[start])
continue;
size_t count = 0;
std::array<s32, 4> write_values;
for (; (start + count) < 4; count++)
{
if (!m_values_dirty[start + count])
break;
m_readback_buffer->Read((start + count) * sizeof(s32), &write_values[count], sizeof(s32),
false);
m_values_dirty[start + count] = false;
}
// We can't issue vkCmdUpdateBuffer within a render pass.
// However, the writes must be serialized, so we can't put it in the init buffer.
if (!updated_buffer)
{
state_tracker->EndRenderPass();
// Ensure GPU buffer is in a state where it can be transferred to.
Util::BufferMemoryBarrier(
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, 0,
BUFFER_SIZE, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
updated_buffer = true;
}
vkCmdUpdateBuffer(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
start * sizeof(s32), count * sizeof(s32),
reinterpret_cast<const u32*>(write_values.data()));
}
// Restore fragment shader access to the buffer.
if (updated_buffer)
{
Util::BufferMemoryBarrier(
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, 0, BUFFER_SIZE,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
}
// We're now up-to-date.
m_valid = true;
}
void BoundingBox::Invalidate(StateTracker* state_tracker)
{
if (m_gpu_buffer == VK_NULL_HANDLE)
return;
m_valid = false;
}
s32 BoundingBox::Get(StateTracker* state_tracker, size_t index)
{
_assert_(index < NUM_VALUES);
if (!m_valid)
Readback(state_tracker);
s32 value;
m_readback_buffer->Read(index * sizeof(s32), &value, sizeof(value), false);
return value;
}
void BoundingBox::Set(StateTracker* state_tracker, size_t index, s32 value)
{
_assert_(index < NUM_VALUES);
// If we're currently valid, update the stored value in both our cache and the GPU buffer.
if (m_valid)
{
// Skip when it hasn't changed.
s32 current_value;
m_readback_buffer->Read(index * sizeof(s32), &current_value, sizeof(current_value), false);
if (current_value == value)
return;
}
// Flag as dirty, and update values.
m_readback_buffer->Write(index * sizeof(s32), &value, sizeof(value), true);
m_values_dirty[index] = true;
}
bool BoundingBox::CreateGPUBuffer()
{
VkBufferUsageFlags buffer_usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT;
VkBufferCreateInfo info = {
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkBufferCreateFlags flags
BUFFER_SIZE, // VkDeviceSize size
buffer_usage, // VkBufferUsageFlags usage
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
0, // uint32_t queueFamilyIndexCount
nullptr // const uint32_t* pQueueFamilyIndices
};
VkBuffer buffer;
VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &info, nullptr, &buffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return false;
}
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements);
uint32_t memory_type_index = g_vulkan_context->GetMemoryType(memory_requirements.memoryTypeBits,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VkMemoryAllocateInfo memory_allocate_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
memory_requirements.size, // VkDeviceSize allocationSize
memory_type_index // uint32_t memoryTypeIndex
};
VkDeviceMemory memory;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
return false;
}
res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr);
return false;
}
m_gpu_buffer = buffer;
m_gpu_memory = memory;
return true;
}
bool BoundingBox::CreateReadbackBuffer()
{
m_readback_buffer = StagingBuffer::Create(STAGING_BUFFER_TYPE_READBACK, BUFFER_SIZE,
VK_BUFFER_USAGE_TRANSFER_DST_BIT);
if (!m_readback_buffer || !m_readback_buffer->Map())
return false;
return true;
}
void BoundingBox::Readback(StateTracker* state_tracker)
{
// Can't be done within a render pass.
state_tracker->EndRenderPass();
// Ensure all writes are completed to the GPU buffer prior to the transfer.
Util::BufferMemoryBarrier(
g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0,
BUFFER_SIZE, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
m_readback_buffer->PrepareForGPUWrite(g_command_buffer_mgr->GetCurrentCommandBuffer(),
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
// Copy from GPU -> readback buffer.
VkBufferCopy region = {0, 0, BUFFER_SIZE};
vkCmdCopyBuffer(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
m_readback_buffer->GetBuffer(), 1, &region);
// Restore GPU buffer access.
Util::BufferMemoryBarrier(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_gpu_buffer,
VK_ACCESS_TRANSFER_READ_BIT,
VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, 0, BUFFER_SIZE,
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
m_readback_buffer->FlushGPUCache(g_command_buffer_mgr->GetCurrentCommandBuffer(),
VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
// Wait until these commands complete.
Util::ExecuteCurrentCommandsAndRestoreState(state_tracker, false, true);
// Cache is now valid.
m_readback_buffer->InvalidateCPUCache();
m_valid = true;
}
} // namespace Vulkan

View File

@ -0,0 +1,52 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include "Common/CommonTypes.h"
#include "VideoBackends/Vulkan/VulkanLoader.h"
namespace Vulkan
{
class StagingBuffer;
class StateTracker;
class BoundingBox
{
public:
BoundingBox();
~BoundingBox();
bool Initialize();
VkBuffer GetGPUBuffer() const { return m_gpu_buffer; }
VkDeviceSize GetGPUBufferOffset() const { return 0; }
VkDeviceSize GetGPUBufferSize() const { return BUFFER_SIZE; }
s32 Get(StateTracker* state_tracker, size_t index);
void Set(StateTracker* state_tracker, size_t index, s32 value);
void Invalidate(StateTracker* state_tracker);
void Flush(StateTracker* state_tracker);
private:
bool CreateGPUBuffer();
bool CreateReadbackBuffer();
void Readback(StateTracker* state_tracker);
VkBuffer m_gpu_buffer = VK_NULL_HANDLE;
VkDeviceMemory m_gpu_memory = nullptr;
static const size_t NUM_VALUES = 4;
static const size_t BUFFER_SIZE = sizeof(u32) * NUM_VALUES;
std::unique_ptr<StagingBuffer> m_readback_buffer;
std::array<bool, NUM_VALUES> m_values_dirty = {};
bool m_valid = true;
};
} // namespace Vulkan

View File

@ -0,0 +1,42 @@
set(SRCS
BoundingBox.cpp
CommandBufferManager.cpp
FramebufferManager.cpp
ObjectCache.cpp
PaletteTextureConverter.cpp
PerfQuery.cpp
RasterFont.cpp
Renderer.cpp
ShaderCompiler.cpp
StateTracker.cpp
StagingBuffer.cpp
StagingTexture2D.cpp
StreamBuffer.cpp
SwapChain.cpp
Texture2D.cpp
TextureCache.cpp
TextureEncoder.cpp
Util.cpp
VertexFormat.cpp
VertexManager.cpp
VulkanContext.cpp
VulkanLoader.cpp
main.cpp
)
set(LIBS
videocommon
common
)
# Only include the Vulkan headers when building the Vulkan backend
include_directories(${CMAKE_SOURCE_DIR}/Externals/Vulkan/Include)
# Silence warnings on glslang by flagging it as a system include
include_directories(SYSTEM ${CMAKE_SOURCE_DIR}/Externals/glslang/glslang/Public)
include_directories(SYSTEM ${CMAKE_SOURCE_DIR}/Externals/glslang/SPIRV)
# Link against glslang, the other necessary libraries are referenced by the executable.
add_dolphin_library(videovulkan "${SRCS}" "${LIBS}")
target_link_libraries(videovulkan glslang)

View File

@ -0,0 +1,457 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstdint>
#include "Common/Assert.h"
#include "Common/CommonFuncs.h"
#include "Common/MsgHandler.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
CommandBufferManager::CommandBufferManager(bool use_threaded_submission)
: m_submit_semaphore(1, 1), m_use_threaded_submission(use_threaded_submission)
{
}
CommandBufferManager::~CommandBufferManager()
{
// If the worker thread is enabled, wait for it to exit.
if (m_use_threaded_submission)
{
// Wait for all command buffers to be consumed by the worker thread.
m_submit_semaphore.Wait();
m_submit_loop->Stop();
m_submit_thread.join();
}
vkDeviceWaitIdle(g_vulkan_context->GetDevice());
DestroyCommandBuffers();
DestroyCommandPool();
}
bool CommandBufferManager::Initialize()
{
if (!CreateCommandPool())
return false;
if (!CreateCommandBuffers())
return false;
if (m_use_threaded_submission && !CreateSubmitThread())
return false;
return true;
}
bool CommandBufferManager::CreateCommandPool()
{
VkCommandPoolCreateInfo info = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr,
VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
g_vulkan_context->GetGraphicsQueueFamilyIndex()};
VkResult res =
vkCreateCommandPool(g_vulkan_context->GetDevice(), &info, nullptr, &m_command_pool);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateCommandPool failed: ");
return false;
}
return true;
}
void CommandBufferManager::DestroyCommandPool()
{
if (m_command_pool)
{
vkDestroyCommandPool(g_vulkan_context->GetDevice(), m_command_pool, nullptr);
m_command_pool = nullptr;
}
}
bool CommandBufferManager::CreateCommandBuffers()
{
VkDevice device = g_vulkan_context->GetDevice();
for (FrameResources& resources : m_frame_resources)
{
resources.needs_fence_wait = false;
VkCommandBufferAllocateInfo allocate_info = {
VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, nullptr, m_command_pool,
VK_COMMAND_BUFFER_LEVEL_PRIMARY, static_cast<uint32_t>(resources.command_buffers.size())};
VkResult res =
vkAllocateCommandBuffers(device, &allocate_info, resources.command_buffers.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateCommandBuffers failed: ");
return false;
}
VkFenceCreateInfo fence_info = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr,
VK_FENCE_CREATE_SIGNALED_BIT};
res = vkCreateFence(device, &fence_info, nullptr, &resources.fence);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFence failed: ");
return false;
}
// TODO: A better way to choose the number of descriptors.
VkDescriptorPoolSize pool_sizes[] = {{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 500000},
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 500000},
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 16},
{VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1024}};
VkDescriptorPoolCreateInfo pool_create_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
nullptr,
0,
100000, // tweak this
static_cast<u32>(ArraySize(pool_sizes)),
pool_sizes};
res = vkCreateDescriptorPool(device, &pool_create_info, nullptr, &resources.descriptor_pool);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDescriptorPool failed: ");
return false;
}
}
// Activate the first command buffer. ActivateCommandBuffer moves forward, so start with the last
m_current_frame = m_frame_resources.size() - 1;
ActivateCommandBuffer();
return true;
}
void CommandBufferManager::DestroyCommandBuffers()
{
VkDevice device = g_vulkan_context->GetDevice();
for (FrameResources& resources : m_frame_resources)
{
for (const auto& it : resources.cleanup_resources)
it.destroy_callback(device, it.object);
resources.cleanup_resources.clear();
if (resources.fence != VK_NULL_HANDLE)
{
vkDestroyFence(device, resources.fence, nullptr);
resources.fence = VK_NULL_HANDLE;
}
if (resources.descriptor_pool != VK_NULL_HANDLE)
{
vkDestroyDescriptorPool(device, resources.descriptor_pool, nullptr);
resources.descriptor_pool = VK_NULL_HANDLE;
}
if (resources.command_buffers[0] != VK_NULL_HANDLE)
{
vkFreeCommandBuffers(device, m_command_pool,
static_cast<u32>(resources.command_buffers.size()),
resources.command_buffers.data());
resources.command_buffers.fill(VK_NULL_HANDLE);
}
}
}
VkDescriptorSet CommandBufferManager::AllocateDescriptorSet(VkDescriptorSetLayout set_layout)
{
VkDescriptorSetAllocateInfo allocate_info = {
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, nullptr,
m_frame_resources[m_current_frame].descriptor_pool, 1, &set_layout};
VkDescriptorSet descriptor_set;
VkResult res =
vkAllocateDescriptorSets(g_vulkan_context->GetDevice(), &allocate_info, &descriptor_set);
if (res != VK_SUCCESS)
{
// Failing to allocate a descriptor set is not a fatal error, we can
// recover by moving to the next command buffer.
return VK_NULL_HANDLE;
}
return descriptor_set;
}
bool CommandBufferManager::CreateSubmitThread()
{
m_submit_loop = std::make_unique<Common::BlockingLoop>();
m_submit_thread = std::thread([this]() {
m_submit_loop->Run([this]() {
PendingCommandBufferSubmit submit;
{
std::lock_guard<std::mutex> guard(m_pending_submit_lock);
if (m_pending_submits.empty())
{
m_submit_loop->AllowSleep();
return;
}
submit = m_pending_submits.front();
m_pending_submits.pop_front();
}
SubmitCommandBuffer(submit.index, submit.wait_semaphore, submit.signal_semaphore,
submit.present_swap_chain, submit.present_image_index);
});
});
return true;
}
void CommandBufferManager::PrepareToSubmitCommandBuffer()
{
// Grab the semaphore before submitting command buffer either on-thread or off-thread.
// This prevents a race from occurring where a second command buffer is executed
// before the worker thread has woken and executed the first one yet.
m_submit_semaphore.Wait();
}
void CommandBufferManager::WaitForWorkerThreadIdle()
{
// Drain the semaphore, then allow another request in the future.
m_submit_semaphore.Wait();
m_submit_semaphore.Post();
}
void CommandBufferManager::WaitForGPUIdle()
{
WaitForWorkerThreadIdle();
vkDeviceWaitIdle(g_vulkan_context->GetDevice());
}
void CommandBufferManager::WaitForFence(VkFence fence)
{
// Find the command buffer that this fence corresponds to.
size_t command_buffer_index = 0;
for (; command_buffer_index < m_frame_resources.size(); command_buffer_index++)
{
if (m_frame_resources[command_buffer_index].fence == fence)
break;
}
_assert_(command_buffer_index < m_frame_resources.size());
// Has this command buffer already been waited for?
if (!m_frame_resources[command_buffer_index].needs_fence_wait)
return;
// Wait for this command buffer to be completed.
VkResult res =
vkWaitForFences(g_vulkan_context->GetDevice(), 1,
&m_frame_resources[command_buffer_index].fence, VK_TRUE, UINT64_MAX);
if (res != VK_SUCCESS)
LOG_VULKAN_ERROR(res, "vkWaitForFences failed: ");
// Immediately fire callbacks and cleanups, since the commands has been completed.
m_frame_resources[command_buffer_index].needs_fence_wait = false;
OnCommandBufferExecuted(command_buffer_index);
}
void CommandBufferManager::SubmitCommandBuffer(bool submit_on_worker_thread,
VkSemaphore wait_semaphore,
VkSemaphore signal_semaphore,
VkSwapchainKHR present_swap_chain,
uint32_t present_image_index)
{
FrameResources& resources = m_frame_resources[m_current_frame];
// Fire fence tracking callbacks. This can't happen on the worker thread.
// We invoke these before submitting so that any last-minute commands can be added.
for (const auto& iter : m_fence_point_callbacks)
iter.second.first(resources.command_buffers[1], resources.fence);
// End the current command buffer.
for (VkCommandBuffer command_buffer : resources.command_buffers)
{
VkResult res = vkEndCommandBuffer(command_buffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkEndCommandBuffer failed: ");
PanicAlert("Failed to end command buffer");
}
}
// This command buffer now has commands, so can't be re-used without waiting.
resources.needs_fence_wait = true;
// Submitting off-thread?
if (m_use_threaded_submission && submit_on_worker_thread)
{
// Push to the pending submit queue.
{
std::lock_guard<std::mutex> guard(m_pending_submit_lock);
m_pending_submits.push_back({m_current_frame, wait_semaphore, signal_semaphore,
present_swap_chain, present_image_index});
}
// Wake up the worker thread for a single iteration.
m_submit_loop->Wakeup();
}
else
{
// Pass through to normal submission path.
SubmitCommandBuffer(m_current_frame, wait_semaphore, signal_semaphore, present_swap_chain,
present_image_index);
}
}
void CommandBufferManager::SubmitCommandBuffer(size_t index, VkSemaphore wait_semaphore,
VkSemaphore signal_semaphore,
VkSwapchainKHR present_swap_chain,
uint32_t present_image_index)
{
FrameResources& resources = m_frame_resources[index];
// This may be executed on the worker thread, so don't modify any state of the manager class.
uint32_t wait_bits = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
VkSubmitInfo submit_info = {VK_STRUCTURE_TYPE_SUBMIT_INFO,
nullptr,
0,
nullptr,
&wait_bits,
static_cast<u32>(resources.command_buffers.size()),
resources.command_buffers.data(),
0,
nullptr};
if (wait_semaphore != VK_NULL_HANDLE)
{
submit_info.pWaitSemaphores = &wait_semaphore;
submit_info.waitSemaphoreCount = 1;
}
if (signal_semaphore != VK_NULL_HANDLE)
{
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &signal_semaphore;
}
VkResult res =
vkQueueSubmit(g_vulkan_context->GetGraphicsQueue(), 1, &submit_info, resources.fence);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkQueueSubmit failed: ");
PanicAlert("Failed to submit command buffer.");
}
// Do we have a swap chain to present?
if (present_swap_chain != VK_NULL_HANDLE)
{
// Should have a signal semaphore.
_assert_(signal_semaphore != VK_NULL_HANDLE);
VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
nullptr,
1,
&signal_semaphore,
1,
&present_swap_chain,
&present_image_index,
nullptr};
res = vkQueuePresentKHR(g_vulkan_context->GetGraphicsQueue(), &present_info);
if (res != VK_SUCCESS && res != VK_ERROR_OUT_OF_DATE_KHR && res != VK_SUBOPTIMAL_KHR)
LOG_VULKAN_ERROR(res, "vkQueuePresentKHR failed: ");
}
// Command buffer has been queued, so permit the next one.
m_submit_semaphore.Post();
}
void CommandBufferManager::OnCommandBufferExecuted(size_t index)
{
FrameResources& resources = m_frame_resources[index];
// Fire fence tracking callbacks.
for (const auto& iter : m_fence_point_callbacks)
iter.second.second(resources.fence);
// Clean up all objects pending destruction on this command buffer
for (const auto& it : resources.cleanup_resources)
it.destroy_callback(g_vulkan_context->GetDevice(), it.object);
resources.cleanup_resources.clear();
}
void CommandBufferManager::ActivateCommandBuffer()
{
// Move to the next command buffer.
m_current_frame = (m_current_frame + 1) % NUM_COMMAND_BUFFERS;
FrameResources& resources = m_frame_resources[m_current_frame];
// Wait for the GPU to finish with all resources for this command buffer.
if (resources.needs_fence_wait)
{
VkResult res =
vkWaitForFences(g_vulkan_context->GetDevice(), 1, &resources.fence, true, UINT64_MAX);
if (res != VK_SUCCESS)
LOG_VULKAN_ERROR(res, "vkWaitForFences failed: ");
OnCommandBufferExecuted(m_current_frame);
}
// Reset fence to unsignaled before starting.
VkResult res = vkResetFences(g_vulkan_context->GetDevice(), 1, &resources.fence);
if (res != VK_SUCCESS)
LOG_VULKAN_ERROR(res, "vkResetFences failed: ");
// Reset command buffer to beginning since we can re-use the memory now
VkCommandBufferBeginInfo begin_info = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr};
for (VkCommandBuffer command_buffer : resources.command_buffers)
{
res = vkResetCommandBuffer(command_buffer, 0);
if (res != VK_SUCCESS)
LOG_VULKAN_ERROR(res, "vkResetCommandBuffer failed: ");
res = vkBeginCommandBuffer(command_buffer, &begin_info);
if (res != VK_SUCCESS)
LOG_VULKAN_ERROR(res, "vkBeginCommandBuffer failed: ");
}
// Also can do the same for the descriptor pools
res = vkResetDescriptorPool(g_vulkan_context->GetDevice(), resources.descriptor_pool, 0);
if (res != VK_SUCCESS)
LOG_VULKAN_ERROR(res, "vkResetDescriptorPool failed: ");
}
void CommandBufferManager::ExecuteCommandBuffer(bool submit_off_thread, bool wait_for_completion)
{
VkFence pending_fence = GetCurrentCommandBufferFence();
// If we're waiting for completion, don't bother waking the worker thread.
PrepareToSubmitCommandBuffer();
SubmitCommandBuffer((submit_off_thread && wait_for_completion));
ActivateCommandBuffer();
if (wait_for_completion)
WaitForFence(pending_fence);
}
void CommandBufferManager::AddFencePointCallback(
const void* key, const CommandBufferQueuedCallback& queued_callback,
const CommandBufferExecutedCallback& executed_callback)
{
// Shouldn't be adding twice.
_assert_(m_fence_point_callbacks.find(key) == m_fence_point_callbacks.end());
m_fence_point_callbacks.emplace(key, std::make_pair(queued_callback, executed_callback));
}
void CommandBufferManager::RemoveFencePointCallback(const void* key)
{
auto iter = m_fence_point_callbacks.find(key);
_assert_(iter != m_fence_point_callbacks.end());
m_fence_point_callbacks.erase(iter);
}
std::unique_ptr<CommandBufferManager> g_command_buffer_mgr;
}

View File

@ -0,0 +1,154 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <deque>
#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <thread>
#include <utility>
#include <vector>
#include "Common/BlockingLoop.h"
#include "Common/Semaphore.h"
#include "VideoCommon/VideoCommon.h"
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoBackends/Vulkan/Util.h"
namespace Vulkan
{
class CommandBufferManager
{
public:
explicit CommandBufferManager(bool use_threaded_submission);
~CommandBufferManager();
bool Initialize();
VkCommandPool GetCommandPool() const { return m_command_pool; }
// These command buffers are allocated per-frame. They are valid until the command buffer
// is submitted, after that you should call these functions again.
VkCommandBuffer GetCurrentInitCommandBuffer() const
{
return m_frame_resources[m_current_frame].command_buffers[0];
}
VkCommandBuffer GetCurrentCommandBuffer() const
{
return m_frame_resources[m_current_frame].command_buffers[1];
}
VkDescriptorPool GetCurrentDescriptorPool() const
{
return m_frame_resources[m_current_frame].descriptor_pool;
}
// Allocates a descriptors set from the pool reserved for the current frame.
VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout);
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
VkFence GetCurrentCommandBufferFence() const { return m_frame_resources[m_current_frame].fence; }
// Ensure the worker thread has submitted the previous frame's command buffer.
void PrepareToSubmitCommandBuffer();
// Ensure that the worker thread has submitted any previous command buffers and is idle.
void WaitForWorkerThreadIdle();
// Ensure that the worker thread has both submitted all commands, and the GPU has caught up.
// Use with caution, huge performance penalty.
void WaitForGPUIdle();
// Wait for a fence to be completed.
// Also invokes callbacks for completion.
void WaitForFence(VkFence fence);
void SubmitCommandBuffer(bool submit_on_worker_thread,
VkSemaphore wait_semaphore = VK_NULL_HANDLE,
VkSemaphore signal_semaphore = VK_NULL_HANDLE,
VkSwapchainKHR present_swap_chain = VK_NULL_HANDLE,
uint32_t present_image_index = 0xFFFFFFFF);
void ActivateCommandBuffer();
void ExecuteCommandBuffer(bool submit_off_thread, bool wait_for_completion);
// Schedule a vulkan resource for destruction later on. This will occur when the command buffer
// is next re-used, and the GPU has finished working with the specified resource.
template <typename T>
void DeferResourceDestruction(T object)
{
DeferredResourceDestruction wrapper = DeferredResourceDestruction::Wrapper<T>(object);
m_frame_resources[m_current_frame].cleanup_resources.push_back(wrapper);
}
// Instruct the manager to fire the specified callback when a fence is flagged to be signaled.
// This happens when command buffers are executed, and can be tested if signaled, which means
// that all commands up to the point when the callback was fired have completed.
using CommandBufferQueuedCallback = std::function<void(VkCommandBuffer, VkFence)>;
using CommandBufferExecutedCallback = std::function<void(VkFence)>;
void AddFencePointCallback(const void* key, const CommandBufferQueuedCallback& queued_callback,
const CommandBufferExecutedCallback& executed_callback);
void RemoveFencePointCallback(const void* key);
private:
bool CreateCommandPool();
void DestroyCommandPool();
bool CreateCommandBuffers();
void DestroyCommandBuffers();
bool CreateSubmitThread();
void SubmitCommandBuffer(size_t index, VkSemaphore wait_semaphore, VkSemaphore signal_semaphore,
VkSwapchainKHR present_swap_chain, uint32_t present_image_index);
void OnCommandBufferExecuted(size_t index);
VkCommandPool m_command_pool = VK_NULL_HANDLE;
struct FrameResources
{
// [0] - Init (upload) command buffer, [1] - draw command buffer
std::array<VkCommandBuffer, 2> command_buffers;
VkDescriptorPool descriptor_pool;
VkFence fence;
bool needs_fence_wait;
std::vector<DeferredResourceDestruction> cleanup_resources;
};
std::array<FrameResources, NUM_COMMAND_BUFFERS> m_frame_resources = {};
size_t m_current_frame;
// callbacks when a fence point is set
std::map<const void*, std::pair<CommandBufferQueuedCallback, CommandBufferExecutedCallback>>
m_fence_point_callbacks;
// Threaded command buffer execution
// Semaphore determines when a command buffer can be queued
Common::Semaphore m_submit_semaphore;
std::thread m_submit_thread;
std::unique_ptr<Common::BlockingLoop> m_submit_loop;
struct PendingCommandBufferSubmit
{
size_t index;
VkSemaphore wait_semaphore;
VkSemaphore signal_semaphore;
VkSwapchainKHR present_swap_chain;
uint32_t present_image_index;
};
std::deque<PendingCommandBufferSubmit> m_pending_submits;
std::mutex m_pending_submit_lock;
bool m_use_threaded_submission = false;
};
extern std::unique_ptr<CommandBufferManager> g_command_buffer_mgr;
} // namespace Vulkan

View File

@ -0,0 +1,136 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include "Common/BitField.h"
#include "VideoBackends/Vulkan/VulkanLoader.h"
namespace Vulkan
{
// Number of command buffers. Having two allows one buffer to be
// executed whilst another is being built.
constexpr size_t NUM_COMMAND_BUFFERS = 2;
// Staging buffer usage - optimize for uploads or readbacks
enum STAGING_BUFFER_TYPE
{
STAGING_BUFFER_TYPE_UPLOAD,
STAGING_BUFFER_TYPE_READBACK
};
// Descriptor sets
enum DESCRIPTOR_SET
{
DESCRIPTOR_SET_UNIFORM_BUFFERS,
DESCRIPTOR_SET_PIXEL_SHADER_SAMPLERS,
DESCRIPTOR_SET_SHADER_STORAGE_BUFFERS,
NUM_DESCRIPTOR_SETS
};
// Uniform buffer bindings within the first descriptor set
enum UNIFORM_BUFFER_DESCRIPTOR_SET_BINDING
{
UBO_DESCRIPTOR_SET_BINDING_PS,
UBO_DESCRIPTOR_SET_BINDING_VS,
UBO_DESCRIPTOR_SET_BINDING_GS,
NUM_UBO_DESCRIPTOR_SET_BINDINGS
};
// Maximum number of attributes per vertex (we don't have any more than this?)
constexpr size_t MAX_VERTEX_ATTRIBUTES = 16;
// Number of pixel shader texture slots
constexpr size_t NUM_PIXEL_SHADER_SAMPLERS = 8;
// Total number of binding points in the pipeline layout
constexpr size_t TOTAL_PIPELINE_BINDING_POINTS =
NUM_UBO_DESCRIPTOR_SET_BINDINGS + NUM_PIXEL_SHADER_SAMPLERS + 1;
// Format of EFB textures
constexpr VkFormat EFB_COLOR_TEXTURE_FORMAT = VK_FORMAT_R8G8B8A8_UNORM;
constexpr VkFormat EFB_DEPTH_TEXTURE_FORMAT = VK_FORMAT_D32_SFLOAT;
constexpr VkFormat EFB_DEPTH_AS_COLOR_TEXTURE_FORMAT = VK_FORMAT_R32_SFLOAT;
// Format of texturecache textures
constexpr VkFormat TEXTURECACHE_TEXTURE_FORMAT = VK_FORMAT_R8G8B8A8_UNORM;
// Textures that don't fit into this buffer will be uploaded with a separate buffer (see below).
constexpr size_t INITIAL_TEXTURE_UPLOAD_BUFFER_SIZE = 16 * 1024 * 1024;
constexpr size_t MAXIMUM_TEXTURE_UPLOAD_BUFFER_SIZE = 64 * 1024 * 1024;
// Textures greater than 1024*1024 will be put in staging textures that are released after
// execution instead. A 2048x2048 texture is 16MB, and we'd only fit four of these in our
// streaming buffer and be blocking frequently. Games are unlikely to have textures this
// large anyway, so it's only really an issue for HD texture packs, and memory is not
// a limiting factor in these scenarios anyway.
constexpr size_t STAGING_TEXTURE_UPLOAD_THRESHOLD = 1024 * 1024 * 4;
// Streaming uniform buffer size
constexpr size_t INITIAL_UNIFORM_STREAM_BUFFER_SIZE = 16 * 1024 * 1024;
constexpr size_t MAXIMUM_UNIFORM_STREAM_BUFFER_SIZE = 32 * 1024 * 1024;
// Push constant buffer size for utility shaders
constexpr u32 PUSH_CONSTANT_BUFFER_SIZE = 128;
// Rasterization state info
union RasterizationState {
BitField<0, 2, VkCullModeFlags> cull_mode;
BitField<2, 7, VkSampleCountFlagBits> samples;
BitField<9, 1, VkBool32> per_sample_shading;
BitField<10, 1, VkBool32> depth_clamp;
u32 bits;
};
// Depth state info
union DepthStencilState {
BitField<0, 1, VkBool32> test_enable;
BitField<1, 1, VkBool32> write_enable;
BitField<2, 3, VkCompareOp> compare_op;
u32 bits;
};
// Blend state info
union BlendState {
struct
{
union {
BitField<0, 1, VkBool32> blend_enable;
BitField<1, 3, VkBlendOp> blend_op;
BitField<4, 5, VkBlendFactor> src_blend;
BitField<9, 5, VkBlendFactor> dst_blend;
BitField<14, 3, VkBlendOp> alpha_blend_op;
BitField<17, 5, VkBlendFactor> src_alpha_blend;
BitField<22, 5, VkBlendFactor> dst_alpha_blend;
BitField<27, 4, VkColorComponentFlags> write_mask;
u32 low_bits;
};
union {
BitField<0, 1, VkBool32> logic_op_enable;
BitField<1, 4, VkLogicOp> logic_op;
u32 high_bits;
};
};
u64 bits;
};
// Sampler info
union SamplerState {
BitField<0, 1, VkFilter> min_filter;
BitField<1, 1, VkFilter> mag_filter;
BitField<2, 1, VkSamplerMipmapMode> mipmap_mode;
BitField<3, 2, VkSamplerAddressMode> wrap_u;
BitField<5, 2, VkSamplerAddressMode> wrap_v;
BitField<7, 8, u32> min_lod;
BitField<15, 8, u32> max_lod;
BitField<23, 6, s32> lod_bias; // tm0.lod_bias (8 bits) / 32 gives us 0-7.
BitField<29, 3, u32> anisotropy; // max_anisotropy = 1 << anisotropy, max of 16, so range 0-4.
u32 bits;
};
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,170 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <memory>
#include "VideoCommon/FramebufferManagerBase.h"
#include "VideoBackends/Vulkan/Constants.h"
namespace Vulkan
{
class StagingTexture2D;
class StateTracker;
class StreamBuffer;
class Texture2D;
class VertexFormat;
class XFBSource : public XFBSourceBase
{
void DecodeToTexture(u32 xfb_addr, u32 fb_width, u32 fb_height) override {}
void CopyEFB(float gamma) override {}
};
class FramebufferManager : public FramebufferManagerBase
{
public:
FramebufferManager();
~FramebufferManager();
bool Initialize();
VkRenderPass GetEFBRenderPass() const { return m_efb_render_pass; }
u32 GetEFBWidth() const { return m_efb_width; }
u32 GetEFBHeight() const { return m_efb_height; }
u32 GetEFBLayers() const { return m_efb_layers; }
VkSampleCountFlagBits GetEFBSamples() const { return m_efb_samples; }
Texture2D* GetEFBColorTexture() const { return m_efb_color_texture.get(); }
Texture2D* GetEFBDepthTexture() const { return m_efb_depth_texture.get(); }
VkFramebuffer GetEFBFramebuffer() const { return m_efb_framebuffer; }
void GetTargetSize(unsigned int* width, unsigned int* height) override;
std::unique_ptr<XFBSourceBase> CreateXFBSource(unsigned int target_width,
unsigned int target_height,
unsigned int layers) override
{
return std::make_unique<XFBSource>();
}
void CopyToRealXFB(u32 xfb_addr, u32 fb_stride, u32 fb_height, const EFBRectangle& source_rc,
float gamma = 1.0f) override
{
}
void ResizeEFBTextures();
// Recompile shaders, use when MSAA mode changes.
void RecreateRenderPass();
void RecompileShaders();
// Reinterpret pixel format of EFB color texture.
// Assumes no render pass is currently in progress.
// Swaps EFB framebuffers, so re-bind afterwards.
void ReinterpretPixelData(int convtype);
// Resolve color/depth textures to a non-msaa texture, and return it.
Texture2D* ResolveEFBColorTexture(StateTracker* state_tracker, const VkRect2D& region);
Texture2D* ResolveEFBDepthTexture(StateTracker* state_tracker, const VkRect2D& region);
// Reads a framebuffer value back from the GPU. This may block if the cache is not current.
u32 PeekEFBColor(StateTracker* state_tracker, u32 x, u32 y);
float PeekEFBDepth(StateTracker* state_tracker, u32 x, u32 y);
void InvalidatePeekCache();
// Writes a value to the framebuffer. This will never block, and writes will be batched.
void PokeEFBColor(StateTracker* state_tracker, u32 x, u32 y, u32 color);
void PokeEFBDepth(StateTracker* state_tracker, u32 x, u32 y, float depth);
void FlushEFBPokes(StateTracker* state_tracker);
private:
struct EFBPokeVertex
{
float position[4];
u32 color;
};
bool CreateEFBRenderPass();
void DestroyEFBRenderPass();
bool CreateEFBFramebuffer();
void DestroyEFBFramebuffer();
bool CompileConversionShaders();
void DestroyConversionShaders();
bool CreateReadbackRenderPasses();
void DestroyReadbackRenderPasses();
bool CompileReadbackShaders();
void DestroyReadbackShaders();
bool CreateReadbackTextures();
void DestroyReadbackTextures();
bool CreateReadbackFramebuffer();
void DestroyReadbackFramebuffer();
void CreatePokeVertexFormat();
bool CreatePokeVertexBuffer();
void DestroyPokeVertexBuffer();
bool CompilePokeShaders();
void DestroyPokeShaders();
bool PopulateColorReadbackTexture(StateTracker* state_tracker);
bool PopulateDepthReadbackTexture(StateTracker* state_tracker);
void CreatePokeVertices(std::vector<EFBPokeVertex>* destination_list, u32 x, u32 y, float z,
u32 color);
void DrawPokeVertices(StateTracker* state_tracker, const EFBPokeVertex* vertices,
size_t vertex_count, bool write_color, bool write_depth);
VkRenderPass m_efb_render_pass = VK_NULL_HANDLE;
VkRenderPass m_depth_resolve_render_pass = VK_NULL_HANDLE;
u32 m_efb_width = 0;
u32 m_efb_height = 0;
u32 m_efb_layers = 1;
VkSampleCountFlagBits m_efb_samples = VK_SAMPLE_COUNT_1_BIT;
std::unique_ptr<Texture2D> m_efb_color_texture;
std::unique_ptr<Texture2D> m_efb_convert_color_texture;
std::unique_ptr<Texture2D> m_efb_depth_texture;
std::unique_ptr<Texture2D> m_efb_resolve_color_texture;
std::unique_ptr<Texture2D> m_efb_resolve_depth_texture;
VkFramebuffer m_efb_framebuffer = VK_NULL_HANDLE;
VkFramebuffer m_efb_convert_framebuffer = VK_NULL_HANDLE;
VkFramebuffer m_depth_resolve_framebuffer = VK_NULL_HANDLE;
// Format conversion shaders
VkShaderModule m_ps_rgb8_to_rgba6 = VK_NULL_HANDLE;
VkShaderModule m_ps_rgba6_to_rgb8 = VK_NULL_HANDLE;
VkShaderModule m_ps_depth_resolve = VK_NULL_HANDLE;
// EFB readback texture
std::unique_ptr<Texture2D> m_color_copy_texture;
std::unique_ptr<Texture2D> m_depth_copy_texture;
VkFramebuffer m_color_copy_framebuffer = VK_NULL_HANDLE;
VkFramebuffer m_depth_copy_framebuffer = VK_NULL_HANDLE;
// CPU-side EFB readback texture
std::unique_ptr<StagingTexture2D> m_color_readback_texture;
std::unique_ptr<StagingTexture2D> m_depth_readback_texture;
bool m_color_readback_texture_valid = false;
bool m_depth_readback_texture_valid = false;
// EFB poke drawing setup
std::unique_ptr<VertexFormat> m_poke_vertex_format;
std::unique_ptr<StreamBuffer> m_poke_vertex_stream_buffer;
std::vector<EFBPokeVertex> m_color_poke_vertices;
std::vector<EFBPokeVertex> m_depth_poke_vertices;
VkPrimitiveTopology m_poke_primitive_topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
VkRenderPass m_copy_color_render_pass = VK_NULL_HANDLE;
VkRenderPass m_copy_depth_render_pass = VK_NULL_HANDLE;
VkShaderModule m_copy_color_shader = VK_NULL_HANDLE;
VkShaderModule m_copy_depth_shader = VK_NULL_HANDLE;
VkShaderModule m_poke_vertex_shader = VK_NULL_HANDLE;
VkShaderModule m_poke_geometry_shader = VK_NULL_HANDLE;
VkShaderModule m_poke_fragment_shader = VK_NULL_HANDLE;
};
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,189 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include "Common/LinearDiskCache.h"
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoCommon/GeometryShaderGen.h"
#include "VideoCommon/PixelShaderGen.h"
#include "VideoCommon/VertexShaderGen.h"
namespace Vulkan
{
class CommandBufferManager;
class VertexFormat;
class StreamBuffer;
struct PipelineInfo
{
// These are packed in descending order of size, to avoid any padding so that the structure
// can be copied/compared as a single block of memory. 64-bit pointer size is assumed.
const VertexFormat* vertex_format;
VkPipelineLayout pipeline_layout;
VkShaderModule vs;
VkShaderModule gs;
VkShaderModule ps;
VkRenderPass render_pass;
BlendState blend_state;
RasterizationState rasterization_state;
DepthStencilState depth_stencil_state;
VkPrimitiveTopology primitive_topology;
};
struct PipelineInfoHash
{
std::size_t operator()(const PipelineInfo& key) const;
};
bool operator==(const PipelineInfo& lhs, const PipelineInfo& rhs);
bool operator!=(const PipelineInfo& lhs, const PipelineInfo& rhs);
bool operator<(const PipelineInfo& lhs, const PipelineInfo& rhs);
bool operator>(const PipelineInfo& lhs, const PipelineInfo& rhs);
bool operator==(const SamplerState& lhs, const SamplerState& rhs);
bool operator!=(const SamplerState& lhs, const SamplerState& rhs);
bool operator>(const SamplerState& lhs, const SamplerState& rhs);
bool operator<(const SamplerState& lhs, const SamplerState& rhs);
class ObjectCache
{
public:
ObjectCache();
~ObjectCache();
// We have four shared pipeline layouts:
// - Standard
// - Per-stage UBO (VS/GS/PS, VS constants accessible from PS)
// - 8 combined image samplers (accessible from PS)
// - BBox Enabled
// - Same as standard, plus a single SSBO accessible from PS
// - Push Constant
// - Same as standard, plus 128 bytes of push constants, accessible from all stages.
//
// All three pipeline layouts use the same descriptor set layouts, but the final descriptor set
// (SSBO) is only required when using the BBox Enabled pipeline layout.
//
VkDescriptorSetLayout GetDescriptorSetLayout(DESCRIPTOR_SET set) const
{
return m_descriptor_set_layouts[set];
}
VkPipelineLayout GetStandardPipelineLayout() const { return m_standard_pipeline_layout; }
VkPipelineLayout GetBBoxPipelineLayout() const { return m_bbox_pipeline_layout; }
VkPipelineLayout GetPushConstantPipelineLayout() const { return m_push_constant_pipeline_layout; }
// Shared utility shader resources
VertexFormat* GetUtilityShaderVertexFormat() const
{
return m_utility_shader_vertex_format.get();
}
StreamBuffer* GetUtilityShaderVertexBuffer() const
{
return m_utility_shader_vertex_buffer.get();
}
StreamBuffer* GetUtilityShaderUniformBuffer() const
{
return m_utility_shader_uniform_buffer.get();
}
// Get utility shader header based on current config.
std::string GetUtilityShaderHeader() const;
// Accesses ShaderGen shader caches
VkShaderModule GetVertexShaderForUid(const VertexShaderUid& uid);
VkShaderModule GetGeometryShaderForUid(const GeometryShaderUid& uid);
VkShaderModule GetPixelShaderForUid(const PixelShaderUid& uid, DSTALPHA_MODE dstalpha_mode);
// Static samplers
VkSampler GetPointSampler() const { return m_point_sampler; }
VkSampler GetLinearSampler() const { return m_linear_sampler; }
VkSampler GetSampler(const SamplerState& info);
// Perform at startup, create descriptor layouts, compiles all static shaders.
bool Initialize();
// Find a pipeline by the specified description, if not found, attempts to create it
VkPipeline GetPipeline(const PipelineInfo& info);
// Wipes out the pipeline cache, use when MSAA modes change, for example
// Also destroys the data that would be stored in the disk cache.
void ClearPipelineCache();
// Saves the pipeline cache to disk. Call when shutting down.
void SavePipelineCache();
// Clear sampler cache, use when anisotropy mode changes
// WARNING: Ensure none of the objects from here are in use when calling
void ClearSamplerCache();
// Recompile shared shaders, call when stereo mode changes.
void RecompileSharedShaders();
// Shared shader accessors
VkShaderModule GetScreenQuadVertexShader() const { return m_screen_quad_vertex_shader; }
VkShaderModule GetPassthroughVertexShader() const { return m_passthrough_vertex_shader; }
VkShaderModule GetScreenQuadGeometryShader() const { return m_screen_quad_geometry_shader; }
VkShaderModule GetPassthroughGeometryShader() const { return m_passthrough_geometry_shader; }
private:
bool CreatePipelineCache(bool load_from_disk);
void DestroyPipelineCache();
void LoadShaderCaches();
void DestroyShaderCaches();
bool CreateDescriptorSetLayouts();
void DestroyDescriptorSetLayouts();
bool CreatePipelineLayouts();
void DestroyPipelineLayouts();
bool CreateUtilityShaderVertexFormat();
bool CreateStaticSamplers();
bool CompileSharedShaders();
void DestroySharedShaders();
void DestroySamplers();
std::string GetDiskCacheFileName(const char* type);
std::array<VkDescriptorSetLayout, NUM_DESCRIPTOR_SETS> m_descriptor_set_layouts = {};
VkPipelineLayout m_standard_pipeline_layout = VK_NULL_HANDLE;
VkPipelineLayout m_bbox_pipeline_layout = VK_NULL_HANDLE;
VkPipelineLayout m_push_constant_pipeline_layout = VK_NULL_HANDLE;
std::unique_ptr<VertexFormat> m_utility_shader_vertex_format;
std::unique_ptr<StreamBuffer> m_utility_shader_vertex_buffer;
std::unique_ptr<StreamBuffer> m_utility_shader_uniform_buffer;
template <typename Uid>
struct ShaderCache
{
std::map<Uid, VkShaderModule> shader_map;
LinearDiskCache<Uid, u32> disk_cache;
};
ShaderCache<VertexShaderUid> m_vs_cache;
ShaderCache<GeometryShaderUid> m_gs_cache;
ShaderCache<PixelShaderUid> m_ps_cache;
std::unordered_map<PipelineInfo, VkPipeline, PipelineInfoHash> m_pipeline_objects;
VkPipelineCache m_pipeline_cache = VK_NULL_HANDLE;
std::string m_pipeline_cache_filename;
VkSampler m_point_sampler = VK_NULL_HANDLE;
VkSampler m_linear_sampler = VK_NULL_HANDLE;
std::map<SamplerState, VkSampler> m_sampler_cache;
// Utility/shared shaders
VkShaderModule m_screen_quad_vertex_shader = VK_NULL_HANDLE;
VkShaderModule m_passthrough_vertex_shader = VK_NULL_HANDLE;
VkShaderModule m_screen_quad_geometry_shader = VK_NULL_HANDLE;
VkShaderModule m_passthrough_geometry_shader = VK_NULL_HANDLE;
};
extern std::unique_ptr<ObjectCache> g_object_cache;
} // namespace Vulkan

View File

@ -0,0 +1,311 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstring>
#include "Common/Assert.h"
#include "Common/CommonFuncs.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/FramebufferManager.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
#include "VideoBackends/Vulkan/PaletteTextureConverter.h"
#include "VideoBackends/Vulkan/Renderer.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoBackends/Vulkan/Texture2D.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
PaletteTextureConverter::PaletteTextureConverter()
{
}
PaletteTextureConverter::~PaletteTextureConverter()
{
for (const auto& it : m_shaders)
{
if (it != VK_NULL_HANDLE)
vkDestroyShaderModule(g_vulkan_context->GetDevice(), it, nullptr);
}
if (m_palette_buffer_view != VK_NULL_HANDLE)
vkDestroyBufferView(g_vulkan_context->GetDevice(), m_palette_buffer_view, nullptr);
if (m_palette_set_layout != VK_NULL_HANDLE)
vkDestroyDescriptorSetLayout(g_vulkan_context->GetDevice(), m_palette_set_layout, nullptr);
}
bool PaletteTextureConverter::Initialize()
{
if (!CreateBuffers())
return false;
if (!CompileShaders())
return false;
if (!CreateDescriptorLayout())
return false;
return true;
}
void PaletteTextureConverter::ConvertTexture(StateTracker* state_tracker, VkRenderPass render_pass,
VkFramebuffer dst_framebuffer, Texture2D* src_texture,
u32 width, u32 height, void* palette,
TlutFormat format)
{
struct PSUniformBlock
{
float multiplier;
int texel_buffer_offset;
int pad[2];
};
_assert_(static_cast<size_t>(format) < NUM_PALETTE_CONVERSION_SHADERS);
size_t palette_size = ((format & 0xF) == GX_TF_I4) ? 32 : 512;
VkDescriptorSet texel_buffer_descriptor_set;
// Allocate memory for the palette, and descriptor sets for the buffer.
// If any of these fail, execute a command buffer, and try again.
if (!m_palette_stream_buffer->ReserveMemory(palette_size,
g_vulkan_context->GetTexelBufferAlignment()) ||
(texel_buffer_descriptor_set =
g_command_buffer_mgr->AllocateDescriptorSet(m_palette_set_layout)) == VK_NULL_HANDLE)
{
WARN_LOG(VIDEO, "Executing command list while waiting for space in palette buffer");
Util::ExecuteCurrentCommandsAndRestoreState(state_tracker, false);
if (!m_palette_stream_buffer->ReserveMemory(palette_size,
g_vulkan_context->GetTexelBufferAlignment()) ||
(texel_buffer_descriptor_set =
g_command_buffer_mgr->AllocateDescriptorSet(m_palette_set_layout)) == VK_NULL_HANDLE)
{
PanicAlert("Failed to allocate space for texture conversion");
return;
}
}
// Fill descriptor set #2 (texel buffer)
u32 palette_offset = static_cast<u32>(m_palette_stream_buffer->GetCurrentOffset());
VkWriteDescriptorSet texel_set_write = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
nullptr,
texel_buffer_descriptor_set,
0,
0,
1,
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
nullptr,
nullptr,
&m_palette_buffer_view};
vkUpdateDescriptorSets(g_vulkan_context->GetDevice(), 1, &texel_set_write, 0, nullptr);
Util::BufferMemoryBarrier(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
m_palette_stream_buffer->GetBuffer(), VK_ACCESS_HOST_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT, palette_offset, palette_size,
VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
// Set up draw
UtilityShaderDraw draw(g_command_buffer_mgr->GetCurrentInitCommandBuffer(), m_pipeline_layout,
render_pass, g_object_cache->GetScreenQuadVertexShader(), VK_NULL_HANDLE,
m_shaders[format]);
VkRect2D region = {{0, 0}, {width, height}};
draw.BeginRenderPass(dst_framebuffer, region);
// Copy in palette
memcpy(m_palette_stream_buffer->GetCurrentHostPointer(), palette, palette_size);
m_palette_stream_buffer->CommitMemory(palette_size);
// PS Uniforms/Samplers
PSUniformBlock uniforms = {};
uniforms.multiplier = ((format & 0xF)) == GX_TF_I4 ? 15.0f : 255.0f;
uniforms.texel_buffer_offset = static_cast<int>(palette_offset / sizeof(u16));
draw.SetPushConstants(&uniforms, sizeof(uniforms));
draw.SetPSSampler(0, src_texture->GetView(), g_object_cache->GetPointSampler());
// We have to bind the texel buffer descriptor set separately.
vkCmdBindDescriptorSets(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline_layout, 0, 1,
&texel_buffer_descriptor_set, 0, nullptr);
// Draw
draw.SetViewportAndScissor(0, 0, width, height);
draw.DrawWithoutVertexBuffer(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, 4);
draw.EndRenderPass();
}
bool PaletteTextureConverter::CreateBuffers()
{
// TODO: Check against maximum size
static const size_t BUFFER_SIZE = 1024 * 1024;
m_palette_stream_buffer =
StreamBuffer::Create(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, BUFFER_SIZE, BUFFER_SIZE);
if (!m_palette_stream_buffer)
return false;
// Create a view of the whole buffer, we'll offset our texel load into it
VkBufferViewCreateInfo view_info = {
VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkBufferViewCreateFlags flags
m_palette_stream_buffer->GetBuffer(), // VkBuffer buffer
VK_FORMAT_R16_UINT, // VkFormat format
0, // VkDeviceSize offset
BUFFER_SIZE // VkDeviceSize range
};
VkResult res = vkCreateBufferView(g_vulkan_context->GetDevice(), &view_info, nullptr,
&m_palette_buffer_view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBufferView failed: ");
return false;
}
return true;
}
bool PaletteTextureConverter::CompileShaders()
{
static const char PALETTE_CONVERSION_FRAGMENT_SHADER_SOURCE[] = R"(
layout(std140, push_constant) uniform PCBlock
{
float multiplier;
int texture_buffer_offset;
} PC;
layout(set = 1, binding = 0) uniform sampler2DArray samp0;
layout(set = 0, binding = 0) uniform usamplerBuffer samp1;
layout(location = 0) in vec3 f_uv0;
layout(location = 0) out vec4 ocol0;
int Convert3To8(int v)
{
// Swizzle bits: 00000123 -> 12312312
return (v << 5) | (v << 2) | (v >> 1);
}
int Convert4To8(int v)
{
// Swizzle bits: 00001234 -> 12341234
return (v << 4) | v;
}
int Convert5To8(int v)
{
// Swizzle bits: 00012345 -> 12345123
return (v << 3) | (v >> 2);
}
int Convert6To8(int v)
{
// Swizzle bits: 00123456 -> 12345612
return (v << 2) | (v >> 4);
}
float4 DecodePixel_RGB5A3(int val)
{
int r,g,b,a;
if ((val&0x8000) > 0)
{
r=Convert5To8((val>>10) & 0x1f);
g=Convert5To8((val>>5 ) & 0x1f);
b=Convert5To8((val ) & 0x1f);
a=0xFF;
}
else
{
a=Convert3To8((val>>12) & 0x7);
r=Convert4To8((val>>8 ) & 0xf);
g=Convert4To8((val>>4 ) & 0xf);
b=Convert4To8((val ) & 0xf);
}
return float4(r, g, b, a) / 255.0;
}
float4 DecodePixel_RGB565(int val)
{
int r, g, b, a;
r = Convert5To8((val >> 11) & 0x1f);
g = Convert6To8((val >> 5) & 0x3f);
b = Convert5To8((val) & 0x1f);
a = 0xFF;
return float4(r, g, b, a) / 255.0;
}
float4 DecodePixel_IA8(int val)
{
int i = val & 0xFF;
int a = val >> 8;
return float4(i, i, i, a) / 255.0;
}
void main()
{
int src = int(round(texture(samp0, f_uv0).r * PC.multiplier));
src = int(texelFetch(samp1, src + PC.texture_buffer_offset).r);
src = ((src << 8) & 0xFF00) | (src >> 8);
ocol0 = DECODE(src);
}
)";
std::string palette_ia8_program = StringFromFormat("%s\n%s", "#define DECODE DecodePixel_IA8",
PALETTE_CONVERSION_FRAGMENT_SHADER_SOURCE);
std::string palette_rgb565_program = StringFromFormat(
"%s\n%s", "#define DECODE DecodePixel_RGB565", PALETTE_CONVERSION_FRAGMENT_SHADER_SOURCE);
std::string palette_rgb5a3_program = StringFromFormat(
"%s\n%s", "#define DECODE DecodePixel_RGB5A3", PALETTE_CONVERSION_FRAGMENT_SHADER_SOURCE);
m_shaders[GX_TL_IA8] = Util::CompileAndCreateFragmentShader(palette_ia8_program);
m_shaders[GX_TL_RGB565] = Util::CompileAndCreateFragmentShader(palette_rgb565_program);
m_shaders[GX_TL_RGB5A3] = Util::CompileAndCreateFragmentShader(palette_rgb5a3_program);
return (m_shaders[GX_TL_IA8] != VK_NULL_HANDLE && m_shaders[GX_TL_RGB565] != VK_NULL_HANDLE &&
m_shaders[GX_TL_RGB5A3] != VK_NULL_HANDLE);
}
bool PaletteTextureConverter::CreateDescriptorLayout()
{
static const VkDescriptorSetLayoutBinding set_bindings[] = {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT},
};
static const VkDescriptorSetLayoutCreateInfo set_info = {
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr, 0,
static_cast<u32>(ArraySize(set_bindings)), set_bindings};
VkResult res = vkCreateDescriptorSetLayout(g_vulkan_context->GetDevice(), &set_info, nullptr,
&m_palette_set_layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDescriptorSetLayout failed: ");
return false;
}
VkDescriptorSetLayout sets[] = {m_palette_set_layout, g_object_cache->GetDescriptorSetLayout(
DESCRIPTOR_SET_PIXEL_SHADER_SAMPLERS)};
VkPushConstantRange push_constant_range = {
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, PUSH_CONSTANT_BUFFER_SIZE};
VkPipelineLayoutCreateInfo pipeline_layout_info = {VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
nullptr,
0,
static_cast<u32>(ArraySize(sets)),
sets,
1,
&push_constant_range};
res = vkCreatePipelineLayout(g_vulkan_context->GetDevice(), &pipeline_layout_info, nullptr,
&m_pipeline_layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineLayout failed: ");
return false;
}
return true;
}
} // namespace Vulkan

View File

@ -0,0 +1,50 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoCommon/TextureDecoder.h"
namespace Vulkan
{
class StateTracker;
class Texture2D;
// Since this converter uses a uniform texel buffer, we can't use the general pipeline generators.
class PaletteTextureConverter
{
public:
PaletteTextureConverter();
~PaletteTextureConverter();
bool Initialize();
void ConvertTexture(StateTracker* state_tracker, VkRenderPass render_pass,
VkFramebuffer dst_framebuffer, Texture2D* src_texture, u32 width, u32 height,
void* palette, TlutFormat format);
private:
static const size_t NUM_PALETTE_CONVERSION_SHADERS = 3;
bool CreateBuffers();
bool CompileShaders();
bool CreateDescriptorLayout();
VkDescriptorSetLayout m_palette_set_layout = VK_NULL_HANDLE;
VkPipelineLayout m_pipeline_layout = VK_NULL_HANDLE;
std::array<VkShaderModule, NUM_PALETTE_CONVERSION_SHADERS> m_shaders = {};
std::unique_ptr<StreamBuffer> m_palette_stream_buffer;
VkBufferView m_palette_buffer_view = VK_NULL_HANDLE;
std::unique_ptr<StreamBuffer> m_uniform_buffer;
};
} // namespace Vulkan

View File

@ -0,0 +1,370 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstring>
#include <functional>
#include "Common/Assert.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/PerfQuery.h"
#include "VideoBackends/Vulkan/Renderer.h"
#include "VideoBackends/Vulkan/StagingBuffer.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
PerfQuery::PerfQuery()
{
}
PerfQuery::~PerfQuery()
{
g_command_buffer_mgr->RemoveFencePointCallback(this);
if (m_query_pool != VK_NULL_HANDLE)
vkDestroyQueryPool(g_vulkan_context->GetDevice(), m_query_pool, nullptr);
}
bool PerfQuery::Initialize(StateTracker* state_tracker)
{
m_state_tracker = state_tracker;
if (!CreateQueryPool())
{
PanicAlert("Failed to create query pool");
return false;
}
if (!CreateReadbackBuffer())
{
PanicAlert("Failed to create readback buffer");
return false;
}
g_command_buffer_mgr->AddFencePointCallback(
this, std::bind(&PerfQuery::OnCommandBufferQueued, this, std::placeholders::_1,
std::placeholders::_2),
std::bind(&PerfQuery::OnCommandBufferExecuted, this, std::placeholders::_1));
return true;
}
void PerfQuery::EnableQuery(PerfQueryGroup type)
{
// Have we used half of the query buffer already?
if (m_query_count > m_query_buffer.size() / 2)
NonBlockingPartialFlush();
// Block if there are no free slots.
if (m_query_count == PERF_QUERY_BUFFER_SIZE)
{
// ERROR_LOG(VIDEO, "Flushed query buffer early!");
BlockingPartialFlush();
}
if (type == PQG_ZCOMP_ZCOMPLOC || type == PQG_ZCOMP)
{
u32 index = (m_query_read_pos + m_query_count) % PERF_QUERY_BUFFER_SIZE;
ActiveQuery& entry = m_query_buffer[index];
_assert_(!entry.active && !entry.available);
entry.active = true;
m_query_count++;
DEBUG_LOG(VIDEO, "start query %u", index);
// Use precise queries if supported, otherwise boolean (which will be incorrect).
VkQueryControlFlags flags = 0;
if (g_vulkan_context->SupportsPreciseOcclusionQueries())
flags = VK_QUERY_CONTROL_PRECISE_BIT;
// Ensure the query starts within a render pass.
// TODO: Is this needed?
m_state_tracker->BeginRenderPass();
vkCmdBeginQuery(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_query_pool, index, flags);
// Prevent background command buffer submission while the query is active.
m_state_tracker->SetBackgroundCommandBufferExecution(false);
}
}
void PerfQuery::DisableQuery(PerfQueryGroup type)
{
if (type == PQG_ZCOMP_ZCOMPLOC || type == PQG_ZCOMP)
{
// DisableQuery should be called for each EnableQuery, so subtract one to get the previous one.
u32 index = (m_query_read_pos + m_query_count - 1) % PERF_QUERY_BUFFER_SIZE;
vkCmdEndQuery(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_query_pool, index);
m_state_tracker->SetBackgroundCommandBufferExecution(true);
DEBUG_LOG(VIDEO, "end query %u", index);
}
}
void PerfQuery::ResetQuery()
{
m_query_count = 0;
m_query_read_pos = 0;
std::fill_n(m_results, ArraySize(m_results), 0);
// Reset entire query pool, ensuring all queries are ready to write to.
m_state_tracker->EndRenderPass();
vkCmdResetQueryPool(g_command_buffer_mgr->GetCurrentCommandBuffer(), m_query_pool, 0,
PERF_QUERY_BUFFER_SIZE);
for (auto& entry : m_query_buffer)
{
entry.pending_fence = VK_NULL_HANDLE;
entry.available = false;
entry.active = false;
}
}
u32 PerfQuery::GetQueryResult(PerfQueryType type)
{
u32 result = 0;
if (type == PQ_ZCOMP_INPUT_ZCOMPLOC || type == PQ_ZCOMP_OUTPUT_ZCOMPLOC)
{
result = m_results[PQG_ZCOMP_ZCOMPLOC];
}
else if (type == PQ_ZCOMP_INPUT || type == PQ_ZCOMP_OUTPUT)
{
result = m_results[PQG_ZCOMP];
}
else if (type == PQ_BLEND_INPUT)
{
result = m_results[PQG_ZCOMP] + m_results[PQG_ZCOMP_ZCOMPLOC];
}
else if (type == PQ_EFB_COPY_CLOCKS)
{
result = m_results[PQG_EFB_COPY_CLOCKS];
}
return result / 4;
}
void PerfQuery::FlushResults()
{
while (!IsFlushed())
BlockingPartialFlush();
}
bool PerfQuery::IsFlushed() const
{
return m_query_count == 0;
}
bool PerfQuery::CreateQueryPool()
{
VkQueryPoolCreateInfo info = {
VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkQueryPoolCreateFlags flags
VK_QUERY_TYPE_OCCLUSION, // VkQueryType queryType
PERF_QUERY_BUFFER_SIZE, // uint32_t queryCount
0 // VkQueryPipelineStatisticFlags pipelineStatistics;
};
VkResult res = vkCreateQueryPool(g_vulkan_context->GetDevice(), &info, nullptr, &m_query_pool);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateQueryPool failed: ");
return false;
}
return true;
}
bool PerfQuery::CreateReadbackBuffer()
{
m_readback_buffer = StagingBuffer::Create(STAGING_BUFFER_TYPE_READBACK,
PERF_QUERY_BUFFER_SIZE * sizeof(PerfQueryDataType),
VK_BUFFER_USAGE_TRANSFER_DST_BIT);
// Leave the buffer persistently mapped, we invalidate it when we need to read.
if (!m_readback_buffer || !m_readback_buffer->Map())
return false;
return true;
}
void PerfQuery::QueueCopyQueryResults(VkCommandBuffer command_buffer, VkFence fence,
u32 start_index, u32 query_count)
{
DEBUG_LOG(VIDEO, "queue copy of queries %u-%u", start_index, start_index + query_count - 1);
// Transition buffer for GPU write
// TODO: Is this needed?
m_readback_buffer->PrepareForGPUWrite(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
// Copy from queries -> buffer
vkCmdCopyQueryPoolResults(command_buffer, m_query_pool, start_index, query_count,
m_readback_buffer->GetBuffer(), start_index * sizeof(PerfQueryDataType),
sizeof(PerfQueryDataType), VK_QUERY_RESULT_WAIT_BIT);
// Prepare for host readback
m_readback_buffer->FlushGPUCache(command_buffer, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
// Reset queries so they're ready to use again
vkCmdResetQueryPool(command_buffer, m_query_pool, start_index, query_count);
// Flag all queries as available, but with a fence that has to be completed first
for (u32 i = 0; i < query_count; i++)
{
u32 index = start_index + i;
ActiveQuery& entry = m_query_buffer[index];
entry.pending_fence = fence;
entry.available = true;
entry.active = false;
}
}
void PerfQuery::OnCommandBufferQueued(VkCommandBuffer command_buffer, VkFence fence)
{
// Flag all pending queries that aren't available as available after execution.
u32 copy_start_index = 0;
u32 copy_count = 0;
for (u32 i = 0; i < m_query_count; i++)
{
u32 index = (m_query_read_pos + i) % PERF_QUERY_BUFFER_SIZE;
ActiveQuery& entry = m_query_buffer[index];
// Skip already-copied queries (will happen if a flush hasn't occurred and
// a command buffer hasn't finished executing).
if (entry.available)
{
// These should be grouped together, and at the start.
_assert_(copy_count == 0);
continue;
}
// If this wrapped around, we need to flush the entries before the end of the buffer.
_assert_(entry.active);
if (index < copy_start_index)
{
QueueCopyQueryResults(command_buffer, fence, copy_start_index, copy_count);
copy_start_index = index;
copy_count = 0;
}
else if (copy_count == 0)
{
copy_start_index = index;
}
copy_count++;
}
if (copy_count > 0)
QueueCopyQueryResults(command_buffer, fence, copy_start_index, copy_count);
}
void PerfQuery::OnCommandBufferExecuted(VkFence fence)
{
// Need to save these since ProcessResults will modify them.
u32 query_read_pos = m_query_read_pos;
u32 query_count = m_query_count;
// Flush as many queries as are bound to this fence.
u32 flush_start_index = 0;
u32 flush_count = 0;
for (u32 i = 0; i < query_count; i++)
{
u32 index = (query_read_pos + i) % PERF_QUERY_BUFFER_SIZE;
if (m_query_buffer[index].pending_fence != fence)
{
// These should be grouped together, at the end.
break;
}
// If this wrapped around, we need to flush the entries before the end of the buffer.
if (index < flush_start_index)
{
ProcessResults(flush_start_index, flush_count);
flush_start_index = index;
flush_count = 0;
}
else if (flush_count == 0)
{
flush_start_index = index;
}
flush_count++;
}
if (flush_count > 0)
ProcessResults(flush_start_index, flush_count);
}
void PerfQuery::ProcessResults(u32 start_index, u32 query_count)
{
// Invalidate CPU caches before reading back.
m_readback_buffer->InvalidateCPUCache(start_index * sizeof(PerfQueryDataType),
query_count * sizeof(PerfQueryDataType));
// Should be at maximum query_count queries pending.
_assert_(query_count <= m_query_count);
DEBUG_LOG(VIDEO, "process queries %u-%u", start_index, start_index + query_count - 1);
// Remove pending queries.
for (u32 i = 0; i < query_count; i++)
{
u32 index = (m_query_read_pos + i) % PERF_QUERY_BUFFER_SIZE;
ActiveQuery& entry = m_query_buffer[index];
// Should have a fence associated with it (waiting for a result).
_assert_(entry.pending_fence != VK_NULL_HANDLE);
entry.pending_fence = VK_NULL_HANDLE;
entry.available = false;
entry.active = false;
// Grab result from readback buffer, it will already have been invalidated.
u32 result;
m_readback_buffer->Read(index * sizeof(PerfQueryDataType), &result, sizeof(result), false);
DEBUG_LOG(VIDEO, " query result %u", result);
// NOTE: Reported pixel metrics should be referenced to native resolution
m_results[entry.query_type] +=
static_cast<u32>(static_cast<u64>(result) * EFB_WIDTH / g_renderer->GetTargetWidth() *
EFB_HEIGHT / g_renderer->GetTargetHeight());
}
m_query_read_pos = (m_query_read_pos + query_count) % PERF_QUERY_BUFFER_SIZE;
m_query_count -= query_count;
}
void PerfQuery::NonBlockingPartialFlush()
{
if (IsFlushed())
return;
// Submit a command buffer in the background if the front query is not bound to one.
// Ideally this will complete before the buffer fills.
if (m_query_buffer[m_query_read_pos].pending_fence == VK_NULL_HANDLE)
Util::ExecuteCurrentCommandsAndRestoreState(m_state_tracker, true, false);
}
void PerfQuery::BlockingPartialFlush()
{
if (IsFlushed())
return;
// If the first pending query is needing command buffer execution, do that.
ActiveQuery& entry = m_query_buffer[m_query_read_pos];
if (entry.pending_fence == VK_NULL_HANDLE)
{
// This will callback OnCommandBufferQueued which will set the fence on the entry.
// We wait for completion, which will also call OnCommandBufferExecuted, and clear the fence.
Util::ExecuteCurrentCommandsAndRestoreState(m_state_tracker, false, true);
}
else
{
// The command buffer has been submitted, but is awaiting completion.
// Wait for the fence to complete, which will call OnCommandBufferExecuted.
g_command_buffer_mgr->WaitForFence(entry.pending_fence);
}
}
}

View File

@ -0,0 +1,70 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <memory>
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoCommon/PerfQueryBase.h"
namespace Vulkan
{
class StagingBuffer;
class StateTracker;
class PerfQuery : public PerfQueryBase
{
public:
PerfQuery();
~PerfQuery();
bool Initialize(StateTracker* state_tracker);
void EnableQuery(PerfQueryGroup type) override;
void DisableQuery(PerfQueryGroup type) override;
void ResetQuery() override;
u32 GetQueryResult(PerfQueryType type) override;
void FlushResults() override;
bool IsFlushed() const override;
private:
struct ActiveQuery
{
PerfQueryType query_type;
VkFence pending_fence;
bool available;
bool active;
};
bool CreateQueryPool();
bool CreateReadbackBuffer();
void QueueCopyQueryResults(VkCommandBuffer command_buffer, VkFence fence, u32 start_index,
u32 query_count);
void ProcessResults(u32 start_index, u32 query_count);
void OnCommandBufferQueued(VkCommandBuffer command_buffer, VkFence fence);
void OnCommandBufferExecuted(VkFence fence);
void NonBlockingPartialFlush();
void BlockingPartialFlush();
StateTracker* m_state_tracker = nullptr;
// when testing in SMS: 64 was too small, 128 was ok
// TODO: This should be size_t, but the base class uses u32s
using PerfQueryDataType = u32;
static const u32 PERF_QUERY_BUFFER_SIZE = 512;
std::array<ActiveQuery, PERF_QUERY_BUFFER_SIZE> m_query_buffer = {};
u32 m_query_read_pos = 0;
// TODO: Investigate using pipeline statistics to implement other query types
VkQueryPool m_query_pool = VK_NULL_HANDLE;
// Buffer containing query results. Each query is a u32.
std::unique_ptr<StagingBuffer> m_readback_buffer;
};
} // namespace Vulkan

View File

@ -0,0 +1,408 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <vector>
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/RasterFont.h"
#include "VideoBackends/Vulkan/Texture2D.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
// Based on OGL RasterFont
// TODO: We should move this to common.
namespace Vulkan
{
constexpr int CHAR_WIDTH = 8;
constexpr int CHAR_HEIGHT = 13;
constexpr int CHAR_OFFSET = 32;
constexpr int CHAR_COUNT = 95;
static const u8 rasters[CHAR_COUNT][CHAR_HEIGHT] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36},
{0x00, 0x00, 0x00, 0x66, 0x66, 0xff, 0x66, 0x66, 0xff, 0x66, 0x66, 0x00, 0x00},
{0x00, 0x00, 0x18, 0x7e, 0xff, 0x1b, 0x1f, 0x7e, 0xf8, 0xd8, 0xff, 0x7e, 0x18},
{0x00, 0x00, 0x0e, 0x1b, 0xdb, 0x6e, 0x30, 0x18, 0x0c, 0x76, 0xdb, 0xd8, 0x70},
{0x00, 0x00, 0x7f, 0xc6, 0xcf, 0xd8, 0x70, 0x70, 0xd8, 0xcc, 0xcc, 0x6c, 0x38},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x1c, 0x0c, 0x0e},
{0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x18, 0x0c},
{0x00, 0x00, 0x30, 0x18, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30},
{0x00, 0x00, 0x00, 0x00, 0x99, 0x5a, 0x3c, 0xff, 0x3c, 0x5a, 0x99, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0xff, 0xff, 0x18, 0x18, 0x18, 0x00, 0x00},
{0x00, 0x00, 0x30, 0x18, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x60, 0x60, 0x30, 0x30, 0x18, 0x18, 0x0c, 0x0c, 0x06, 0x06, 0x03, 0x03},
{0x00, 0x00, 0x3c, 0x66, 0xc3, 0xe3, 0xf3, 0xdb, 0xcf, 0xc7, 0xc3, 0x66, 0x3c},
{0x00, 0x00, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x78, 0x38, 0x18},
{0x00, 0x00, 0xff, 0xc0, 0xc0, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x03, 0xe7, 0x7e},
{0x00, 0x00, 0x7e, 0xe7, 0x03, 0x03, 0x07, 0x7e, 0x07, 0x03, 0x03, 0xe7, 0x7e},
{0x00, 0x00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0xff, 0xcc, 0x6c, 0x3c, 0x1c, 0x0c},
{0x00, 0x00, 0x7e, 0xe7, 0x03, 0x03, 0x07, 0xfe, 0xc0, 0xc0, 0xc0, 0xc0, 0xff},
{0x00, 0x00, 0x7e, 0xe7, 0xc3, 0xc3, 0xc7, 0xfe, 0xc0, 0xc0, 0xc0, 0xe7, 0x7e},
{0x00, 0x00, 0x30, 0x30, 0x30, 0x30, 0x18, 0x0c, 0x06, 0x03, 0x03, 0x03, 0xff},
{0x00, 0x00, 0x7e, 0xe7, 0xc3, 0xc3, 0xe7, 0x7e, 0xe7, 0xc3, 0xc3, 0xe7, 0x7e},
{0x00, 0x00, 0x7e, 0xe7, 0x03, 0x03, 0x03, 0x7f, 0xe7, 0xc3, 0xc3, 0xe7, 0x7e},
{0x00, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x30, 0x18, 0x1c, 0x1c, 0x00, 0x00, 0x1c, 0x1c, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x60, 0x30, 0x18, 0x0c, 0x06},
{0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x03, 0x06, 0x0c, 0x18, 0x30, 0x60},
{0x00, 0x00, 0x18, 0x00, 0x00, 0x18, 0x18, 0x0c, 0x06, 0x03, 0xc3, 0xc3, 0x7e},
{0x00, 0x00, 0x3f, 0x60, 0xcf, 0xdb, 0xd3, 0xdd, 0xc3, 0x7e, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xff, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18},
{0x00, 0x00, 0xfe, 0xc7, 0xc3, 0xc3, 0xc7, 0xfe, 0xc7, 0xc3, 0xc3, 0xc7, 0xfe},
{0x00, 0x00, 0x7e, 0xe7, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xe7, 0x7e},
{0x00, 0x00, 0xfc, 0xce, 0xc7, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc7, 0xce, 0xfc},
{0x00, 0x00, 0xff, 0xc0, 0xc0, 0xc0, 0xc0, 0xfc, 0xc0, 0xc0, 0xc0, 0xc0, 0xff},
{0x00, 0x00, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xfc, 0xc0, 0xc0, 0xc0, 0xff},
{0x00, 0x00, 0x7e, 0xe7, 0xc3, 0xc3, 0xcf, 0xc0, 0xc0, 0xc0, 0xc0, 0xe7, 0x7e},
{0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xff, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3},
{0x00, 0x00, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7e},
{0x00, 0x00, 0x7c, 0xee, 0xc6, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06},
{0x00, 0x00, 0xc3, 0xc6, 0xcc, 0xd8, 0xf0, 0xe0, 0xf0, 0xd8, 0xcc, 0xc6, 0xc3},
{0x00, 0x00, 0xff, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0},
{0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xff, 0xff, 0xe7, 0xc3},
{0x00, 0x00, 0xc7, 0xc7, 0xcf, 0xcf, 0xdf, 0xdb, 0xfb, 0xf3, 0xf3, 0xe3, 0xe3},
{0x00, 0x00, 0x7e, 0xe7, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xe7, 0x7e},
{0x00, 0x00, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xfe, 0xc7, 0xc3, 0xc3, 0xc7, 0xfe},
{0x00, 0x00, 0x3f, 0x6e, 0xdf, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c},
{0x00, 0x00, 0xc3, 0xc6, 0xcc, 0xd8, 0xf0, 0xfe, 0xc7, 0xc3, 0xc3, 0xc7, 0xfe},
{0x00, 0x00, 0x7e, 0xe7, 0x03, 0x03, 0x07, 0x7e, 0xe0, 0xc0, 0xc0, 0xe7, 0x7e},
{0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff},
{0x00, 0x00, 0x7e, 0xe7, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3},
{0x00, 0x00, 0x18, 0x3c, 0x3c, 0x66, 0x66, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3},
{0x00, 0x00, 0xc3, 0xe7, 0xff, 0xff, 0xdb, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3},
{0x00, 0x00, 0xc3, 0x66, 0x66, 0x3c, 0x3c, 0x18, 0x3c, 0x3c, 0x66, 0x66, 0xc3},
{0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x66, 0x66, 0xc3},
{0x00, 0x00, 0xff, 0xc0, 0xc0, 0x60, 0x30, 0x7e, 0x0c, 0x06, 0x03, 0x03, 0xff},
{0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x3c},
{0x00, 0x03, 0x03, 0x06, 0x06, 0x0c, 0x0c, 0x18, 0x18, 0x30, 0x30, 0x60, 0x60},
{0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18},
{0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x30, 0x70},
{0x00, 0x00, 0x7f, 0xc3, 0xc3, 0x7f, 0x03, 0xc3, 0x7e, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xfe, 0xc3, 0xc3, 0xc3, 0xc3, 0xfe, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0},
{0x00, 0x00, 0x7e, 0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x7f, 0xc3, 0xc3, 0xc3, 0xc3, 0x7f, 0x03, 0x03, 0x03, 0x03, 0x03},
{0x00, 0x00, 0x7f, 0xc0, 0xc0, 0xfe, 0xc3, 0xc3, 0x7e, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x30, 0x30, 0x30, 0x30, 0x30, 0xfc, 0x30, 0x30, 0x30, 0x33, 0x1e},
{0x7e, 0xc3, 0x03, 0x03, 0x7f, 0xc3, 0xc3, 0xc3, 0x7e, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xfe, 0xc0, 0xc0, 0xc0, 0xc0},
{0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x18, 0x00},
{0x38, 0x6c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x00, 0x00, 0x0c, 0x00},
{0x00, 0x00, 0xc6, 0xcc, 0xf8, 0xf0, 0xd8, 0xcc, 0xc6, 0xc0, 0xc0, 0xc0, 0xc0},
{0x00, 0x00, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x78},
{0x00, 0x00, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xfe, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xfc, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00},
{0xc0, 0xc0, 0xc0, 0xfe, 0xc3, 0xc3, 0xc3, 0xc3, 0xfe, 0x00, 0x00, 0x00, 0x00},
{0x03, 0x03, 0x03, 0x7f, 0xc3, 0xc3, 0xc3, 0xc3, 0x7f, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xe0, 0xfe, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xfe, 0x03, 0x03, 0x7e, 0xc0, 0xc0, 0x7f, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x1c, 0x36, 0x30, 0x30, 0x30, 0x30, 0xfc, 0x30, 0x30, 0x30, 0x00},
{0x00, 0x00, 0x7e, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x18, 0x3c, 0x3c, 0x66, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xc3, 0xe7, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3, 0x00, 0x00, 0x00, 0x00},
{0xc0, 0x60, 0x60, 0x30, 0x18, 0x3c, 0x66, 0x66, 0xc3, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0xff, 0x60, 0x30, 0x18, 0x0c, 0x06, 0xff, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x0f, 0x18, 0x18, 0x18, 0x38, 0xf0, 0x38, 0x18, 0x18, 0x18, 0x0f},
{0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18},
{0x00, 0x00, 0xf0, 0x18, 0x18, 0x18, 0x1c, 0x0f, 0x1c, 0x18, 0x18, 0x18, 0xf0},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x8f, 0xf1, 0x60, 0x00, 0x00, 0x00}};
static const char VERTEX_SHADER_SOURCE[] = R"(
layout(std140, push_constant) uniform PCBlock {
vec2 char_size;
vec2 offset;
vec4 color;
} PC;
layout(location = 0) in vec4 ipos;
layout(location = 5) in vec4 icol0;
layout(location = 8) in vec3 itex0;
layout(location = 0) out vec2 uv0;
void main()
{
gl_Position = vec4(ipos.xy + PC.offset, 0.0f, 1.0f);
gl_Position.y = -gl_Position.y;
uv0 = itex0.xy * PC.char_size;
}
)";
static const char FRAGMENT_SHADER_SOURCE[] = R"(
layout(std140, push_constant) uniform PCBlock {
vec2 char_size;
vec2 offset;
vec4 color;
} PC;
layout(set = 1, binding = 0) uniform sampler2D samp0;
layout(location = 0) in vec2 uv0;
layout(location = 0) out vec4 ocol0;
void main()
{
ocol0 = texture(samp0, uv0) * PC.color;
}
)";
RasterFont::RasterFont()
{
}
RasterFont::~RasterFont()
{
if (m_vertex_shader != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_vertex_shader);
if (m_fragment_shader != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_fragment_shader);
}
bool RasterFont::Initialize()
{
// Create shaders and texture
if (!CreateShaders() || !CreateTexture())
return false;
return true;
}
bool RasterFont::CreateTexture()
{
// generate the texture
std::vector<u32> texture_data(CHAR_WIDTH * CHAR_COUNT * CHAR_HEIGHT);
for (int y = 0; y < CHAR_HEIGHT; y++)
{
for (int c = 0; c < CHAR_COUNT; c++)
{
for (int x = 0; x < CHAR_WIDTH; x++)
{
bool pixel = (0 != (rasters[c][y] & (1 << (CHAR_WIDTH - x - 1))));
texture_data[CHAR_WIDTH * CHAR_COUNT * y + CHAR_WIDTH * c + x] = pixel ? -1 : 0;
}
}
}
// create the actual texture object
m_texture =
Texture2D::Create(CHAR_WIDTH * CHAR_COUNT, CHAR_HEIGHT, 1, 1, VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
if (!m_texture)
return false;
// create temporary buffer for uploading texture
VkBufferCreateInfo buffer_info = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
nullptr,
0,
static_cast<VkDeviceSize>(texture_data.size() * sizeof(u32)),
VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr};
VkBuffer temp_buffer;
VkResult res = vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_info, nullptr, &temp_buffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return false;
}
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), temp_buffer, &memory_requirements);
uint32_t memory_type_index = g_vulkan_context->GetMemoryType(memory_requirements.memoryTypeBits,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
VkMemoryAllocateInfo memory_allocate_info = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, nullptr,
memory_requirements.size, memory_type_index};
VkDeviceMemory temp_buffer_memory;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr,
&temp_buffer_memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), temp_buffer, nullptr);
return false;
}
// Bind buffer to memory
res = vkBindBufferMemory(g_vulkan_context->GetDevice(), temp_buffer, temp_buffer_memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), temp_buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), temp_buffer_memory, nullptr);
return false;
}
// Copy into buffer
void* mapped_ptr;
res = vkMapMemory(g_vulkan_context->GetDevice(), temp_buffer_memory, 0, buffer_info.size, 0,
&mapped_ptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkMapMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), temp_buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), temp_buffer_memory, nullptr);
return false;
}
// Copy texture data into staging buffer
memcpy(mapped_ptr, texture_data.data(), texture_data.size() * sizeof(u32));
vkUnmapMemory(g_vulkan_context->GetDevice(), temp_buffer_memory);
// Copy from staging buffer to the final texture
VkBufferImageCopy region = {0, CHAR_WIDTH * CHAR_COUNT,
CHAR_HEIGHT, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1},
{0, 0, 0}, {CHAR_WIDTH * CHAR_COUNT, CHAR_HEIGHT, 1}};
m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdCopyBufferToImage(g_command_buffer_mgr->GetCurrentInitCommandBuffer(), temp_buffer,
m_texture->GetImage(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
// Free temp buffers after command buffer executes
m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
g_command_buffer_mgr->DeferResourceDestruction(temp_buffer);
g_command_buffer_mgr->DeferResourceDestruction(temp_buffer_memory);
return true;
}
bool RasterFont::CreateShaders()
{
m_vertex_shader = Util::CompileAndCreateVertexShader(VERTEX_SHADER_SOURCE);
m_fragment_shader = Util::CompileAndCreateFragmentShader(FRAGMENT_SHADER_SOURCE);
return m_vertex_shader != VK_NULL_HANDLE && m_fragment_shader != VK_NULL_HANDLE;
}
void RasterFont::PrintMultiLineText(VkRenderPass render_pass, const std::string& text,
float start_x, float start_y, u32 bbWidth, u32 bbHeight,
u32 color)
{
// skip empty strings
if (text.empty())
return;
UtilityShaderDraw draw(g_command_buffer_mgr->GetCurrentCommandBuffer(),
g_object_cache->GetPushConstantPipelineLayout(), render_pass,
m_vertex_shader, VK_NULL_HANDLE, m_fragment_shader);
UtilityShaderVertex* vertices =
draw.ReserveVertices(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, text.length() * 6);
size_t num_vertices = 0;
if (!vertices)
return;
float delta_x = float(2 * CHAR_WIDTH) / float(bbWidth);
float delta_y = float(2 * CHAR_HEIGHT) / float(bbHeight);
float border_x = 2.0f / float(bbWidth);
float border_y = 4.0f / float(bbHeight);
float x = float(start_x);
float y = float(start_y);
for (const char& c : text)
{
if (c == '\n')
{
x = float(start_x);
y -= delta_y + border_y;
continue;
}
// do not print spaces, they can be skipped easily
if (c == ' ')
{
x += delta_x + border_x;
continue;
}
if (c < CHAR_OFFSET || c >= CHAR_COUNT + CHAR_OFFSET)
continue;
vertices[num_vertices].SetPosition(x, y);
vertices[num_vertices].SetTextureCoordinates(static_cast<float>(c - CHAR_OFFSET), 0.0f);
num_vertices++;
vertices[num_vertices].SetPosition(x + delta_x, y);
vertices[num_vertices].SetTextureCoordinates(static_cast<float>(c - CHAR_OFFSET + 1), 0.0f);
num_vertices++;
vertices[num_vertices].SetPosition(x + delta_x, y + delta_y);
vertices[num_vertices].SetTextureCoordinates(static_cast<float>(c - CHAR_OFFSET + 1), 1.0f);
num_vertices++;
vertices[num_vertices].SetPosition(x, y);
vertices[num_vertices].SetTextureCoordinates(static_cast<float>(c - CHAR_OFFSET), 0.0f);
num_vertices++;
vertices[num_vertices].SetPosition(x + delta_x, y + delta_y);
vertices[num_vertices].SetTextureCoordinates(static_cast<float>(c - CHAR_OFFSET + 1), 1.0f);
num_vertices++;
vertices[num_vertices].SetPosition(x, y + delta_y);
vertices[num_vertices].SetTextureCoordinates(static_cast<float>(c - CHAR_OFFSET), 1.0f);
num_vertices++;
x += delta_x + border_x;
}
// skip all whitespace strings
if (num_vertices == 0)
return;
draw.CommitVertices(num_vertices);
struct PCBlock
{
float char_size[2];
float offset[2];
float color[4];
} pc_block = {};
pc_block.char_size[0] = 1.0f / static_cast<float>(CHAR_COUNT);
pc_block.char_size[1] = 1.0f;
// shadows
pc_block.offset[0] = 2.0f / bbWidth;
pc_block.offset[1] = -2.0f / bbHeight;
pc_block.color[3] = (color >> 24) / 255.0f;
draw.SetPushConstants(&pc_block, sizeof(pc_block));
draw.SetPSSampler(0, m_texture->GetView(), g_object_cache->GetLinearSampler());
// Setup alpha blending
BlendState blend_state = Util::GetNoBlendingBlendState();
blend_state.blend_enable = VK_TRUE;
blend_state.src_blend = VK_BLEND_FACTOR_SRC_ALPHA;
blend_state.blend_op = VK_BLEND_OP_ADD;
blend_state.dst_blend = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
draw.SetBlendState(blend_state);
draw.Draw();
// non-shadowed part
pc_block.offset[0] = 0.0f;
pc_block.offset[1] = 0.0f;
pc_block.color[0] = ((color >> 16) & 0xFF) / 255.0f;
pc_block.color[1] = ((color >> 8) & 0xFF) / 255.0f;
pc_block.color[2] = (color & 0xFF) / 255.0f;
draw.SetPushConstants(&pc_block, sizeof(pc_block));
draw.Draw();
}
} // namespace Vulkan

View File

@ -0,0 +1,39 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include "Common/CommonTypes.h"
#include "VideoBackends/Vulkan/Constants.h"
namespace Vulkan
{
class Texture2D;
class RasterFont
{
public:
RasterFont();
~RasterFont();
bool Initialize();
void PrintMultiLineText(VkRenderPass render_pass, const std::string& text, float start_x,
float start_y, u32 bbWidth, u32 bbHeight, u32 color);
private:
bool CreateTexture();
bool CreateShaders();
std::unique_ptr<Texture2D> m_texture;
VkShaderModule m_vertex_shader = VK_NULL_HANDLE;
VkShaderModule m_fragment_shader = VK_NULL_HANDLE;
};
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,114 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <memory>
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoCommon/RenderBase.h"
namespace Vulkan
{
class BoundingBox;
class FramebufferManager;
class SwapChain;
class StateTracker;
class Texture2D;
class RasterFont;
class Renderer : public ::Renderer
{
public:
Renderer();
~Renderer();
SwapChain* GetSwapChain() const { return m_swap_chain.get(); }
StateTracker* GetStateTracker() const { return m_state_tracker.get(); }
BoundingBox* GetBoundingBox() const { return m_bounding_box.get(); }
bool Initialize(FramebufferManager* framebuffer_mgr, void* window_handle, VkSurfaceKHR surface);
void RenderText(const std::string& pstr, int left, int top, u32 color) override;
u32 AccessEFB(EFBAccessType type, u32 x, u32 y, u32 poke_data) override;
void PokeEFB(EFBAccessType type, const EfbPokeData* points, size_t num_points) override;
u16 BBoxRead(int index) override;
void BBoxWrite(int index, u16 value) override;
int GetMaxTextureSize() override { return 16 * 1024; }
TargetRectangle ConvertEFBRectangle(const EFBRectangle& rc) override;
void SwapImpl(u32 xfb_addr, u32 fb_width, u32 fb_stride, u32 fb_height, const EFBRectangle& rc,
float gamma) override;
void ClearScreen(const EFBRectangle& rc, bool color_enable, bool alpha_enable, bool z_enable,
u32 color, u32 z) override;
void ReinterpretPixelData(unsigned int convtype) override;
bool SaveScreenshot(const std::string& filename, const TargetRectangle& rc) override
{
return false;
}
void ApplyState(bool bUseDstAlpha) override;
void ResetAPIState() override;
void RestoreAPIState() override;
void SetColorMask() override;
void SetBlendMode(bool force_update) override;
void SetScissorRect(const EFBRectangle& rc) override;
void SetGenerationMode() override;
void SetDepthMode() override;
void SetLogicOpMode() override;
void SetDitherMode() override;
void SetSamplerState(int stage, int texindex, bool custom_tex) override;
void SetInterlacingMode() override;
void SetViewport() override;
void ChangeSurface(void* new_surface_handle) override;
private:
bool CreateSemaphores();
void DestroySemaphores();
void BeginFrame();
void CheckForTargetResize(u32 fb_width, u32 fb_stride, u32 fb_height);
void CheckForSurfaceChange();
void CheckForConfigChanges();
void ResetSamplerStates();
void OnSwapChainResized();
void BindEFBToStateTracker();
void ResizeEFBTextures();
void ResizeSwapChain();
void RecompileShaders();
bool CompileShaders();
void DestroyShaders();
void DrawScreen(const TargetRectangle& src_rect, const Texture2D* src_tex);
void BlitScreen(VkRenderPass render_pass, const TargetRectangle& dst_rect,
const TargetRectangle& src_rect, const Texture2D* src_tex, bool linear_filter);
FramebufferManager* m_framebuffer_mgr = nullptr;
VkSemaphore m_image_available_semaphore = nullptr;
VkSemaphore m_rendering_finished_semaphore = nullptr;
std::unique_ptr<SwapChain> m_swap_chain;
std::unique_ptr<StateTracker> m_state_tracker;
std::unique_ptr<BoundingBox> m_bounding_box;
std::unique_ptr<RasterFont> m_raster_font;
// Keep a copy of sampler states to avoid cache lookups every draw
std::array<SamplerState, NUM_PIXEL_SHADER_SAMPLERS> m_sampler_states = {};
// Shaders used for clear/blit.
VkShaderModule m_clear_fragment_shader = VK_NULL_HANDLE;
VkShaderModule m_blit_fragment_shader = VK_NULL_HANDLE;
};
}

View File

@ -0,0 +1,334 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <fstream>
#include <string>
// glslang includes
#include "GlslangToSpv.h"
#include "ShaderLang.h"
#include "disassemble.h"
#include "Common/FileUtil.h"
#include "Common/Logging/Log.h"
#include "Common/MsgHandler.h"
#include "Common/StringUtil.h"
#include "VideoBackends/Vulkan/ShaderCompiler.h"
#include "VideoCommon/VideoConfig.h"
namespace Vulkan
{
namespace ShaderCompiler
{
// Registers itself for cleanup via atexit
bool InitializeGlslang();
// Resource limits used when compiling shaders
static const TBuiltInResource* GetCompilerResourceLimits();
// Compile a shader to SPIR-V via glslang
static bool CompileShaderToSPV(SPIRVCodeVector* out_code, EShLanguage stage,
const char* stage_filename, const char* source_code,
size_t source_code_length, bool prepend_header);
// Regarding the UBO bind points, we subtract one from the binding index because
// the OpenGL backend requires UBO #0 for non-block uniforms (at least on NV).
// This allows us to share the same shaders but use bind point #0 in the Vulkan
// backend. None of the Vulkan-specific shaders use UBOs, instead they use push
// constants, so when/if the GL backend moves to uniform blocks completely this
// subtraction can be removed.
static const char SHADER_HEADER[] = R"(
// Target GLSL 4.5.
#version 450 core
#define ATTRIBUTE_LOCATION(x) layout(location = x)
#define FRAGMENT_OUTPUT_LOCATION(x) layout(location = x)
#define FRAGMENT_OUTPUT_LOCATION_INDEXED(x, y) layout(location = x, index = y)
#define UBO_BINDING(packing, x) layout(packing, set = 0, binding = (x - 1))
#define SAMPLER_BINDING(x) layout(set = 1, binding = x)
#define SSBO_BINDING(x) layout(set = 2, binding = x)
#define VARYING_LOCATION(x) layout(location = x)
#define FORCE_EARLY_Z layout(early_fragment_tests) in
// hlsl to glsl function translation
#define float2 vec2
#define float3 vec3
#define float4 vec4
#define uint2 uvec2
#define uint3 uvec3
#define uint4 uvec4
#define int2 ivec2
#define int3 ivec3
#define int4 ivec4
#define frac fract
#define lerp mix
// These were changed in Vulkan
#define gl_VertexID gl_VertexIndex
#define gl_InstanceID gl_InstanceIndex
)";
bool CompileShaderToSPV(SPIRVCodeVector* out_code, EShLanguage stage, const char* stage_filename,
const char* source_code, size_t source_code_length, bool prepend_header)
{
if (!InitializeGlslang())
return false;
std::unique_ptr<glslang::TShader> shader = std::make_unique<glslang::TShader>(stage);
std::unique_ptr<glslang::TProgram> program;
glslang::TShader::ForbidInclude includer;
EProfile profile = ECoreProfile;
EShMessages messages =
static_cast<EShMessages>(EShMsgDefault | EShMsgSpvRules | EShMsgVulkanRules);
int default_version = 450;
std::string full_source_code;
const char* pass_source_code = source_code;
int pass_source_code_length = static_cast<int>(source_code_length);
if (prepend_header)
{
full_source_code.reserve(sizeof(SHADER_HEADER) + source_code_length);
full_source_code.append(SHADER_HEADER, sizeof(SHADER_HEADER) - 1);
full_source_code.append(source_code, source_code_length);
pass_source_code = full_source_code.c_str();
pass_source_code_length = static_cast<int>(full_source_code.length());
}
shader->setStringsWithLengths(&pass_source_code, &pass_source_code_length, 1);
auto DumpBadShader = [&](const char* msg) {
static int counter = 0;
std::string filename = StringFromFormat(
"%sbad_%s_%04i.txt", File::GetUserPath(D_DUMP_IDX).c_str(), stage_filename, counter++);
std::ofstream stream;
OpenFStream(stream, filename, std::ios_base::out);
if (stream.good())
{
stream << full_source_code << std::endl;
stream << msg << std::endl;
stream << "Shader Info Log:" << std::endl;
stream << shader->getInfoLog() << std::endl;
stream << shader->getInfoDebugLog() << std::endl;
if (program)
{
stream << "Program Info Log:" << std::endl;
stream << program->getInfoLog() << std::endl;
stream << program->getInfoDebugLog() << std::endl;
}
}
PanicAlert("%s (written to %s)", msg, filename.c_str());
};
if (!shader->parse(GetCompilerResourceLimits(), default_version, profile, false, true, messages,
includer))
{
DumpBadShader("Failed to parse shader");
return false;
}
// Even though there's only a single shader, we still need to link it to generate SPV
program = std::make_unique<glslang::TProgram>();
program->addShader(shader.get());
if (!program->link(messages))
{
DumpBadShader("Failed to link program");
return false;
}
glslang::TIntermediate* intermediate = program->getIntermediate(stage);
if (!intermediate)
{
DumpBadShader("Failed to generate SPIR-V");
return false;
}
spv::SpvBuildLogger logger;
glslang::GlslangToSpv(*intermediate, *out_code, &logger);
// Write out messages
// Temporary: skip if it contains "Warning, version 450 is not yet complete; most version-specific
// features are present, but some are missing."
if (strlen(shader->getInfoLog()) > 108)
WARN_LOG(VIDEO, "Shader info log: %s", shader->getInfoLog());
if (strlen(shader->getInfoDebugLog()) > 0)
WARN_LOG(VIDEO, "Shader debug info log: %s", shader->getInfoDebugLog());
if (strlen(program->getInfoLog()) > 25)
WARN_LOG(VIDEO, "Program info log: %s", program->getInfoLog());
if (strlen(program->getInfoDebugLog()) > 0)
WARN_LOG(VIDEO, "Program debug info log: %s", program->getInfoDebugLog());
std::string spv_messages = logger.getAllMessages();
if (!spv_messages.empty())
WARN_LOG(VIDEO, "SPIR-V conversion messages: %s", spv_messages.c_str());
// Dump source code of shaders out to file if enabled.
if (g_ActiveConfig.iLog & CONF_SAVESHADERS)
{
static int counter = 0;
std::string filename = StringFromFormat("%s%s_%04i.txt", File::GetUserPath(D_DUMP_IDX).c_str(),
stage_filename, counter++);
std::ofstream stream;
OpenFStream(stream, filename, std::ios_base::out);
if (stream.good())
{
stream << full_source_code << std::endl;
stream << "Shader Info Log:" << std::endl;
stream << shader->getInfoLog() << std::endl;
stream << shader->getInfoDebugLog() << std::endl;
stream << "Program Info Log:" << std::endl;
stream << program->getInfoLog() << std::endl;
stream << program->getInfoDebugLog() << std::endl;
stream << "SPIR-V conversion messages: " << std::endl;
stream << spv_messages;
stream << "SPIR-V:" << std::endl;
spv::Disassemble(stream, *out_code);
}
}
return true;
}
bool InitializeGlslang()
{
static bool glslang_initialized = false;
if (glslang_initialized)
return true;
if (!glslang::InitializeProcess())
{
PanicAlert("Failed to initialize glslang shader compiler");
return false;
}
std::atexit([]() { glslang::FinalizeProcess(); });
glslang_initialized = true;
return true;
}
const TBuiltInResource* GetCompilerResourceLimits()
{
static const TBuiltInResource limits = {/* .MaxLights = */ 32,
/* .MaxClipPlanes = */ 6,
/* .MaxTextureUnits = */ 32,
/* .MaxTextureCoords = */ 32,
/* .MaxVertexAttribs = */ 64,
/* .MaxVertexUniformComponents = */ 4096,
/* .MaxVaryingFloats = */ 64,
/* .MaxVertexTextureImageUnits = */ 32,
/* .MaxCombinedTextureImageUnits = */ 80,
/* .MaxTextureImageUnits = */ 32,
/* .MaxFragmentUniformComponents = */ 4096,
/* .MaxDrawBuffers = */ 32,
/* .MaxVertexUniformVectors = */ 128,
/* .MaxVaryingVectors = */ 8,
/* .MaxFragmentUniformVectors = */ 16,
/* .MaxVertexOutputVectors = */ 16,
/* .MaxFragmentInputVectors = */ 15,
/* .MinProgramTexelOffset = */ -8,
/* .MaxProgramTexelOffset = */ 7,
/* .MaxClipDistances = */ 8,
/* .MaxComputeWorkGroupCountX = */ 65535,
/* .MaxComputeWorkGroupCountY = */ 65535,
/* .MaxComputeWorkGroupCountZ = */ 65535,
/* .MaxComputeWorkGroupSizeX = */ 1024,
/* .MaxComputeWorkGroupSizeY = */ 1024,
/* .MaxComputeWorkGroupSizeZ = */ 64,
/* .MaxComputeUniformComponents = */ 1024,
/* .MaxComputeTextureImageUnits = */ 16,
/* .MaxComputeImageUniforms = */ 8,
/* .MaxComputeAtomicCounters = */ 8,
/* .MaxComputeAtomicCounterBuffers = */ 1,
/* .MaxVaryingComponents = */ 60,
/* .MaxVertexOutputComponents = */ 64,
/* .MaxGeometryInputComponents = */ 64,
/* .MaxGeometryOutputComponents = */ 128,
/* .MaxFragmentInputComponents = */ 128,
/* .MaxImageUnits = */ 8,
/* .MaxCombinedImageUnitsAndFragmentOutputs = */ 8,
/* .MaxCombinedShaderOutputResources = */ 8,
/* .MaxImageSamples = */ 0,
/* .MaxVertexImageUniforms = */ 0,
/* .MaxTessControlImageUniforms = */ 0,
/* .MaxTessEvaluationImageUniforms = */ 0,
/* .MaxGeometryImageUniforms = */ 0,
/* .MaxFragmentImageUniforms = */ 8,
/* .MaxCombinedImageUniforms = */ 8,
/* .MaxGeometryTextureImageUnits = */ 16,
/* .MaxGeometryOutputVertices = */ 256,
/* .MaxGeometryTotalOutputComponents = */ 1024,
/* .MaxGeometryUniformComponents = */ 1024,
/* .MaxGeometryVaryingComponents = */ 64,
/* .MaxTessControlInputComponents = */ 128,
/* .MaxTessControlOutputComponents = */ 128,
/* .MaxTessControlTextureImageUnits = */ 16,
/* .MaxTessControlUniformComponents = */ 1024,
/* .MaxTessControlTotalOutputComponents = */ 4096,
/* .MaxTessEvaluationInputComponents = */ 128,
/* .MaxTessEvaluationOutputComponents = */ 128,
/* .MaxTessEvaluationTextureImageUnits = */ 16,
/* .MaxTessEvaluationUniformComponents = */ 1024,
/* .MaxTessPatchComponents = */ 120,
/* .MaxPatchVertices = */ 32,
/* .MaxTessGenLevel = */ 64,
/* .MaxViewports = */ 16,
/* .MaxVertexAtomicCounters = */ 0,
/* .MaxTessControlAtomicCounters = */ 0,
/* .MaxTessEvaluationAtomicCounters = */ 0,
/* .MaxGeometryAtomicCounters = */ 0,
/* .MaxFragmentAtomicCounters = */ 8,
/* .MaxCombinedAtomicCounters = */ 8,
/* .MaxAtomicCounterBindings = */ 1,
/* .MaxVertexAtomicCounterBuffers = */ 0,
/* .MaxTessControlAtomicCounterBuffers = */ 0,
/* .MaxTessEvaluationAtomicCounterBuffers = */ 0,
/* .MaxGeometryAtomicCounterBuffers = */ 0,
/* .MaxFragmentAtomicCounterBuffers = */ 1,
/* .MaxCombinedAtomicCounterBuffers = */ 1,
/* .MaxAtomicCounterBufferSize = */ 16384,
/* .MaxTransformFeedbackBuffers = */ 4,
/* .MaxTransformFeedbackInterleavedComponents = */ 64,
/* .MaxCullDistances = */ 8,
/* .MaxCombinedClipAndCullDistances = */ 8,
/* .MaxSamples = */ 4,
/* .limits = */ {
/* .nonInductiveForLoops = */ 1,
/* .whileLoops = */ 1,
/* .doWhileLoops = */ 1,
/* .generalUniformIndexing = */ 1,
/* .generalAttributeMatrixVectorIndexing = */ 1,
/* .generalVaryingIndexing = */ 1,
/* .generalSamplerIndexing = */ 1,
/* .generalVariableIndexing = */ 1,
/* .generalConstantMatrixVectorIndexing = */ 1,
}};
return &limits;
}
bool CompileVertexShader(SPIRVCodeVector* out_code, const char* source_code,
size_t source_code_length, bool prepend_header)
{
return CompileShaderToSPV(out_code, EShLangVertex, "vs", source_code, source_code_length,
prepend_header);
}
bool CompileGeometryShader(SPIRVCodeVector* out_code, const char* source_code,
size_t source_code_length, bool prepend_header)
{
return CompileShaderToSPV(out_code, EShLangGeometry, "gs", source_code, source_code_length,
prepend_header);
}
bool CompileFragmentShader(SPIRVCodeVector* out_code, const char* source_code,
size_t source_code_length, bool prepend_header)
{
return CompileShaderToSPV(out_code, EShLangFragment, "ps", source_code, source_code_length,
prepend_header);
}
} // namespace ShaderCompiler
} // namespace Vulkan

View File

@ -0,0 +1,32 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "VideoBackends/Vulkan/Constants.h"
namespace Vulkan
{
namespace ShaderCompiler
{
// SPIR-V compiled code type
using SPIRVCodeType = u32;
using SPIRVCodeVector = std::vector<SPIRVCodeType>;
// Compile a vertex shader to SPIR-V.
bool CompileVertexShader(SPIRVCodeVector* out_code, const char* source_code,
size_t source_code_length, bool prepend_header = true);
// Compile a geometry shader to SPIR-V.
bool CompileGeometryShader(SPIRVCodeVector* out_code, const char* source_code,
size_t source_code_length, bool prepend_header = true);
// Compile a fragment shader to SPIR-V.
bool CompileFragmentShader(SPIRVCodeVector* out_code, const char* source_code,
size_t source_code_length, bool prepend_header = true);
} // namespace ShaderCompiler
} // namespace Vulkan

View File

@ -0,0 +1,198 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstring>
#include "Common/Assert.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/StagingBuffer.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
StagingBuffer::StagingBuffer(STAGING_BUFFER_TYPE type, VkBuffer buffer, VkDeviceMemory memory,
VkDeviceSize size, bool coherent)
: m_type(type), m_buffer(buffer), m_memory(memory), m_size(size), m_coherent(coherent)
{
}
StagingBuffer::~StagingBuffer()
{
// Unmap before destroying
if (m_map_pointer)
Unmap();
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
g_command_buffer_mgr->DeferResourceDestruction(m_buffer);
}
bool StagingBuffer::Map(VkDeviceSize offset, VkDeviceSize size)
{
m_map_offset = offset;
if (size == VK_WHOLE_SIZE)
m_map_size = m_size - offset;
else
m_map_size = size;
_assert_(!m_map_pointer);
_assert_(m_map_offset + m_map_size <= m_size);
void* map_pointer;
VkResult res = vkMapMemory(g_vulkan_context->GetDevice(), m_memory, m_map_offset, m_map_size, 0,
&map_pointer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkMapMemory failed: ");
return false;
}
m_map_pointer = reinterpret_cast<char*>(map_pointer);
return true;
}
void StagingBuffer::Unmap()
{
_assert_(m_map_pointer);
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
m_map_pointer = nullptr;
m_map_offset = 0;
m_map_size = 0;
}
void StagingBuffer::FlushCPUCache(VkDeviceSize offset, VkDeviceSize size)
{
_assert_(offset >= m_map_offset);
if (m_coherent)
return;
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory,
offset - m_map_offset, size};
vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
void StagingBuffer::InvalidateGPUCache(VkCommandBuffer command_buffer,
VkAccessFlagBits dest_access_flags,
VkPipelineStageFlagBits dest_pipeline_stage,
VkDeviceSize offset, VkDeviceSize size)
{
_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_HOST_WRITE_BIT, dest_access_flags,
offset, size, VK_PIPELINE_STAGE_HOST_BIT, dest_pipeline_stage);
}
void StagingBuffer::PrepareForGPUWrite(VkCommandBuffer command_buffer,
VkAccessFlagBits dst_access_flags,
VkPipelineStageFlagBits dst_pipeline_stage,
VkDeviceSize offset, VkDeviceSize size)
{
_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, 0, dst_access_flags, offset, size,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, dst_pipeline_stage);
}
void StagingBuffer::FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits src_access_flags,
VkPipelineStageFlagBits src_pipeline_stage, VkDeviceSize offset,
VkDeviceSize size)
{
_assert_((offset + size) <= m_size || (offset < m_size && size == VK_WHOLE_SIZE));
Util::BufferMemoryBarrier(command_buffer, m_buffer, src_access_flags, VK_ACCESS_HOST_READ_BIT,
offset, size, src_pipeline_stage, VK_PIPELINE_STAGE_HOST_BIT);
}
void StagingBuffer::InvalidateCPUCache(VkDeviceSize offset, VkDeviceSize size)
{
_assert_(offset >= m_map_offset);
if (m_coherent)
return;
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory,
offset - m_map_offset, size};
vkInvalidateMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
void StagingBuffer::Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches)
{
_assert_((offset + size) <= m_size);
_assert_(offset >= m_map_offset && size < (m_map_size + (offset - m_map_offset)));
if (invalidate_caches)
InvalidateCPUCache(offset, size);
memcpy(data, m_map_pointer + (offset - m_map_offset), size);
}
void StagingBuffer::Write(VkDeviceSize offset, const void* data, size_t size,
bool invalidate_caches)
{
_assert_((offset + size) <= m_size);
_assert_(offset >= m_map_offset && size < (m_map_size + (offset - m_map_offset)));
memcpy(m_map_pointer + (offset - m_map_offset), data, size);
if (invalidate_caches)
FlushCPUCache(offset, size);
}
std::unique_ptr<Vulkan::StagingBuffer>
StagingBuffer::Create(STAGING_BUFFER_TYPE type, VkDeviceSize size, VkBufferUsageFlags usage)
{
VkBufferCreateInfo buffer_create_info = {
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkBufferCreateFlags flags
size, // VkDeviceSize size
usage, // VkBufferUsageFlags usage
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
0, // uint32_t queueFamilyIndexCount
nullptr // const uint32_t* pQueueFamilyIndices
};
VkBuffer buffer;
VkResult res =
vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return nullptr;
}
VkMemoryRequirements requirements;
vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &requirements);
bool is_coherent;
u32 type_index;
if (type == STAGING_BUFFER_TYPE_UPLOAD)
type_index = g_vulkan_context->GetUploadMemoryType(requirements.memoryTypeBits, &is_coherent);
else
type_index = g_vulkan_context->GetReadbackMemoryType(requirements.memoryTypeBits, &is_coherent);
VkMemoryAllocateInfo memory_allocate_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
requirements.size, // VkDeviceSize allocationSize
type_index // uint32_t memoryTypeIndex
};
VkDeviceMemory memory;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
return nullptr;
}
res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr);
return nullptr;
}
return std::make_unique<Vulkan::StagingBuffer>(type, buffer, memory, size, is_coherent);
}
} // namespace Vulkan

View File

@ -0,0 +1,71 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "VideoBackends/Vulkan/Constants.h"
namespace Vulkan
{
class StagingBuffer
{
public:
StagingBuffer(STAGING_BUFFER_TYPE type, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize size,
bool coherent);
~StagingBuffer();
STAGING_BUFFER_TYPE GetType() const { return m_type; }
VkDeviceSize GetSize() const { return m_size; }
VkBuffer GetBuffer() const { return m_buffer; }
bool IsMapped() const { return m_map_pointer != nullptr; }
const char* GetMapPointer() const { return m_map_pointer; }
char* GetMapPointer() { return m_map_pointer; }
VkDeviceSize GetMapOffset() const { return m_map_offset; }
VkDeviceSize GetMapSize() const { return m_map_size; }
bool Map(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
void Unmap();
// Upload part 1: Prepare from device read from the CPU side
void FlushCPUCache(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
// Upload part 2: Prepare for device read from the GPU side
void InvalidateGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags,
VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset = 0,
VkDeviceSize size = VK_WHOLE_SIZE);
// Readback part 0: Prepare for GPU usage (if necessary)
void PrepareForGPUWrite(VkCommandBuffer command_buffer, VkAccessFlagBits dst_access_flags,
VkPipelineStageFlagBits dst_pipeline_stage, VkDeviceSize offset = 0,
VkDeviceSize size = VK_WHOLE_SIZE);
// Readback part 1: Prepare for host readback from the GPU side
void FlushGPUCache(VkCommandBuffer command_buffer, VkAccessFlagBits src_access_flags,
VkPipelineStageFlagBits src_pipeline_stage, VkDeviceSize offset = 0,
VkDeviceSize size = VK_WHOLE_SIZE);
// Readback part 2: Prepare for host readback from the CPU side
void InvalidateCPUCache(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
// offset is from the start of the buffer, not from the map offset
void Read(VkDeviceSize offset, void* data, size_t size, bool invalidate_caches = true);
void Write(VkDeviceSize offset, const void* data, size_t size, bool invalidate_caches = true);
// Creates the optimal format of image copy.
static std::unique_ptr<StagingBuffer> Create(STAGING_BUFFER_TYPE type, VkDeviceSize size,
VkBufferUsageFlags usage);
protected:
STAGING_BUFFER_TYPE m_type;
VkBuffer m_buffer;
VkDeviceMemory m_memory;
VkDeviceSize m_size;
bool m_coherent;
char* m_map_pointer = nullptr;
VkDeviceSize m_map_offset = 0;
VkDeviceSize m_map_size = 0;
};
}

View File

@ -0,0 +1,536 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstring>
#include "Common/Assert.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/StagingTexture2D.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
StagingTexture2D::StagingTexture2D(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format,
u32 stride)
: m_type(type), m_width(width), m_height(height), m_format(format),
m_texel_size(Util::GetTexelSize(format)), m_row_stride(stride)
{
}
StagingTexture2D::~StagingTexture2D()
{
_assert_(!m_map_pointer);
}
void StagingTexture2D::ReadTexel(u32 x, u32 y, void* data, size_t data_size) const
{
_assert_(data_size >= m_texel_size);
VkDeviceSize offset = y * m_row_stride + x * m_texel_size;
VkDeviceSize map_offset = offset - m_map_offset;
_assert_(offset >= m_map_offset && (map_offset + m_texel_size) <= (m_map_offset + m_map_size));
const char* ptr = m_map_pointer + map_offset;
memcpy(data, ptr, data_size);
}
void StagingTexture2D::WriteTexel(u32 x, u32 y, const void* data, size_t data_size)
{
_assert_(data_size >= m_texel_size);
VkDeviceSize offset = y * m_row_stride + x * m_texel_size;
VkDeviceSize map_offset = offset - m_map_offset;
_assert_(offset >= m_map_offset && (map_offset + m_texel_size) <= (m_map_offset + m_map_size));
char* ptr = m_map_pointer + map_offset;
memcpy(ptr, data, data_size);
}
void StagingTexture2D::ReadTexels(u32 x, u32 y, u32 width, u32 height, void* data,
u32 data_stride) const
{
const char* src_ptr = GetRowPointer(y);
// Optimal path: same dimensions, same stride.
_assert_((x + width) <= m_width && (y + height) <= m_height);
if (x == 0 && width == m_width && m_row_stride == data_stride)
{
memcpy(data, src_ptr, m_row_stride * height);
return;
}
u32 copy_size = std::min(width * m_texel_size, data_stride);
char* dst_ptr = reinterpret_cast<char*>(data);
for (u32 row = 0; row < height; row++)
{
memcpy(dst_ptr, src_ptr + (x * m_texel_size), copy_size);
src_ptr += m_row_stride;
dst_ptr += data_stride;
}
}
void StagingTexture2D::WriteTexels(u32 x, u32 y, u32 width, u32 height, const void* data,
u32 data_stride)
{
char* dst_ptr = GetRowPointer(y);
// Optimal path: same dimensions, same stride.
_assert_((x + width) <= m_width && (y + height) <= m_height);
if (x == 0 && width == m_width && m_row_stride == data_stride)
{
memcpy(dst_ptr, data, m_row_stride * height);
return;
}
u32 copy_size = std::min(width * m_texel_size, data_stride);
const char* src_ptr = reinterpret_cast<const char*>(data);
for (u32 row = 0; row < height; row++)
{
memcpy(dst_ptr + (x * m_texel_size), src_ptr, copy_size);
dst_ptr += m_row_stride;
src_ptr += data_stride;
}
}
std::unique_ptr<StagingTexture2D> StagingTexture2D::Create(STAGING_BUFFER_TYPE type, u32 width,
u32 height, VkFormat format)
{
// TODO: Using a buffer here as opposed to a linear texture is faster on AMD.
// NVIDIA also seems faster with buffers over textures.
#if 0
// Check for support for this format as a linear texture.
// Some drivers don't support this (e.g. adreno).
VkImageFormatProperties properties;
VkResult res = vkGetPhysicalDeviceImageFormatProperties(
g_object_cache->GetPhysicalDevice(), format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_LINEAR,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, &properties);
if (res == VK_SUCCESS && width <= properties.maxExtent.width &&
height <= properties.maxExtent.height)
{
return StagingTexture2DLinear::Create(type, width, height, format);
}
#endif
// Fall back to a buffer copy.
return StagingTexture2DBuffer::Create(type, width, height, format);
}
StagingTexture2DLinear::StagingTexture2DLinear(STAGING_BUFFER_TYPE type, u32 width, u32 height,
VkFormat format, u32 stride, VkImage image,
VkDeviceMemory memory, VkDeviceSize size,
bool coherent)
: StagingTexture2D(type, width, height, format, stride), m_image(image), m_memory(memory),
m_size(size), m_layout(VK_IMAGE_LAYOUT_PREINITIALIZED), m_coherent(coherent)
{
}
StagingTexture2DLinear::~StagingTexture2DLinear()
{
if (m_map_pointer)
Unmap();
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
g_command_buffer_mgr->DeferResourceDestruction(m_image);
}
void StagingTexture2DLinear::CopyFromImage(VkCommandBuffer command_buffer, VkImage image,
VkImageAspectFlags src_aspect, u32 x, u32 y, u32 width,
u32 height, u32 level, u32 layer)
{
// Prepare the buffer for copying.
// We don't care about the existing contents, so set to UNDEFINED.
VkImageMemoryBarrier before_transfer_barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAccessFlags srcAccessMask
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_image, // VkImage image
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // VkImageSubresourceRange subresourceRange
};
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1,
&before_transfer_barrier);
// Issue the image copy, gpu -> host.
VkImageCopy copy_region = {
{src_aspect, level, layer, 1}, // VkImageSubresourceLayers srcSubresource
{static_cast<s32>(x), static_cast<s32>(y), 0}, // VkOffset3D srcOffset
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1}, // VkImageSubresourceLayers dstSubresource
{0, 0, 0}, // VkOffset3D dstOffset
{width, height, 1} // VkExtent3D extent
};
vkCmdCopyImage(command_buffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
// Ensure writes are visible to the host.
VkImageMemoryBarrier visible_barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask
VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout
VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_image, // VkImage image
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // VkImageSubresourceRange subresourceRange
};
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
0, 0, nullptr, 0, nullptr, 1, &visible_barrier);
m_layout = VK_IMAGE_LAYOUT_GENERAL;
// Invalidate memory range if currently mapped.
if (m_map_pointer && !m_coherent)
{
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory,
m_map_offset, m_map_size};
vkInvalidateMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
}
void StagingTexture2DLinear::CopyToImage(VkCommandBuffer command_buffer, VkImage image,
VkImageAspectFlags dst_aspect, u32 x, u32 y, u32 width,
u32 height, u32 level, u32 layer)
{
// Flush memory range if currently mapped.
if (m_map_pointer && !m_coherent)
{
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory,
m_map_offset, m_map_size};
vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
// Ensure any writes to the image are visible to the GPU.
VkImageMemoryBarrier barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask
VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask
m_layout, // VkImageLayout oldLayout
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_image, // VkImage image
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // VkImageSubresourceRange subresourceRange
};
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, nullptr, 0, nullptr, 1, &barrier);
m_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
// Issue the image copy, host -> gpu.
VkImageCopy copy_region = {
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1}, // VkImageSubresourceLayers srcSubresource
{0, 0, 0}, // VkOffset3D srcOffset
{dst_aspect, level, layer, 1}, // VkImageSubresourceLayers dstSubresource
{static_cast<s32>(x), static_cast<s32>(y), 0}, // VkOffset3D dstOffset
{width, height, 1} // VkExtent3D extent
};
vkCmdCopyImage(command_buffer, m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
}
bool StagingTexture2DLinear::Map(VkDeviceSize offset /* = 0 */,
VkDeviceSize size /* = VK_WHOLE_SIZE */)
{
m_map_offset = offset;
if (size == VK_WHOLE_SIZE)
m_map_size = m_size - offset;
else
m_map_size = size;
_assert_(!m_map_pointer);
_assert_(m_map_offset + m_map_size <= m_size);
void* map_pointer;
VkResult res = vkMapMemory(g_vulkan_context->GetDevice(), m_memory, m_map_offset, m_map_size, 0,
&map_pointer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkMapMemory failed: ");
return false;
}
m_map_pointer = reinterpret_cast<char*>(map_pointer);
return true;
}
void StagingTexture2DLinear::Unmap()
{
_assert_(m_map_pointer);
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
m_map_pointer = nullptr;
m_map_offset = 0;
m_map_size = 0;
}
std::unique_ptr<StagingTexture2D>
StagingTexture2DLinear::Create(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format)
{
VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkImageCreateInfo create_info = {
VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkImageCreateFlags flags
VK_IMAGE_TYPE_2D, // VkImageType imageType
format, // VkFormat format
{width, height, 1}, // VkExtent3D extent
1, // uint32_t mipLevels
1, // uint32_t arrayLayers
VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
VK_IMAGE_TILING_LINEAR, // VkImageTiling tiling
usage, // VkImageUsageFlags usage
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
0, // uint32_t queueFamilyIndexCount
nullptr, // const uint32_t* pQueueFamilyIndices
VK_IMAGE_LAYOUT_PREINITIALIZED // VkImageLayout initialLayout
};
VkImage image;
VkResult res = vkCreateImage(g_vulkan_context->GetDevice(), &create_info, nullptr, &image);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImage failed: ");
return nullptr;
}
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(g_vulkan_context->GetDevice(), image, &memory_requirements);
bool is_coherent;
u32 memory_type_index;
if (type == STAGING_BUFFER_TYPE_READBACK)
{
memory_type_index =
g_vulkan_context->GetReadbackMemoryType(memory_requirements.memoryTypeBits, &is_coherent);
}
else
{
memory_type_index =
g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &is_coherent);
}
VkMemoryAllocateInfo memory_allocate_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
memory_requirements.size, // VkDeviceSize allocationSize
memory_type_index // uint32_t memoryTypeIndex
};
VkDeviceMemory memory;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr);
return nullptr;
}
res = vkBindImageMemory(g_vulkan_context->GetDevice(), image, memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindImageMemory failed: ");
vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr);
return nullptr;
}
// Assume tight packing. Is this correct?
u32 stride = width * Util::GetTexelSize(format);
return std::make_unique<StagingTexture2DLinear>(type, width, height, format, stride, image,
memory, memory_requirements.size, is_coherent);
}
StagingTexture2DBuffer::StagingTexture2DBuffer(STAGING_BUFFER_TYPE type, u32 width, u32 height,
VkFormat format, u32 stride, VkBuffer buffer,
VkDeviceMemory memory, VkDeviceSize size,
bool coherent)
: StagingTexture2D(type, width, height, format, stride), m_buffer(buffer), m_memory(memory),
m_size(size), m_coherent(coherent)
{
}
StagingTexture2DBuffer::~StagingTexture2DBuffer()
{
if (m_map_pointer)
Unmap();
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
g_command_buffer_mgr->DeferResourceDestruction(m_buffer);
}
void StagingTexture2DBuffer::CopyFromImage(VkCommandBuffer command_buffer, VkImage image,
VkImageAspectFlags src_aspect, u32 x, u32 y, u32 width,
u32 height, u32 level, u32 layer)
{
// Issue the image->buffer copy.
VkBufferImageCopy image_copy = {
0, // VkDeviceSize bufferOffset
m_width, // uint32_t bufferRowLength
0, // uint32_t bufferImageHeight
{src_aspect, level, layer, 1}, // VkImageSubresourceLayers imageSubresource
{static_cast<s32>(x), static_cast<s32>(y), 0}, // VkOffset3D imageOffset
{width, height, 1} // VkExtent3D imageExtent
};
vkCmdCopyImageToBuffer(command_buffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_buffer, 1,
&image_copy);
// Ensure the write has completed.
VkDeviceSize copy_size = m_row_stride * height;
Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_HOST_READ_BIT, 0, copy_size, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_HOST_BIT);
// If we're still mapped, invalidate the mapped range
if (m_map_pointer && !m_coherent)
{
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory,
m_map_offset, m_map_size};
vkInvalidateMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
}
void StagingTexture2DBuffer::CopyToImage(VkCommandBuffer command_buffer, VkImage image,
VkImageAspectFlags dst_aspect, u32 x, u32 y, u32 width,
u32 height, u32 level, u32 layer)
{
// If we're still mapped, flush the mapped range
if (m_map_pointer && !m_coherent)
{
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory,
m_map_offset, m_map_size};
vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
// Ensure writes are visible to GPU.
VkDeviceSize copy_size = m_row_stride * height;
Util::BufferMemoryBarrier(command_buffer, m_buffer, VK_ACCESS_HOST_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT, 0, copy_size, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
// Issue the buffer->image copy
VkBufferImageCopy image_copy = {
0, // VkDeviceSize bufferOffset
m_width, // uint32_t bufferRowLength
0, // uint32_t bufferImageHeight
{dst_aspect, level, layer, 1}, // VkImageSubresourceLayers imageSubresource
{static_cast<s32>(x), static_cast<s32>(y), 0}, // VkOffset3D imageOffset
{width, height, 1} // VkExtent3D imageExtent
};
vkCmdCopyBufferToImage(command_buffer, m_buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&image_copy);
}
bool StagingTexture2DBuffer::Map(VkDeviceSize offset /* = 0 */,
VkDeviceSize size /* = VK_WHOLE_SIZE */)
{
m_map_offset = offset;
if (size == VK_WHOLE_SIZE)
m_map_size = m_size - offset;
else
m_map_size = size;
_assert_(!m_map_pointer);
_assert_(m_map_offset + m_map_size <= m_size);
void* map_pointer;
VkResult res = vkMapMemory(g_vulkan_context->GetDevice(), m_memory, m_map_offset, m_map_size, 0,
&map_pointer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkMapMemory failed: ");
return false;
}
m_map_pointer = reinterpret_cast<char*>(map_pointer);
return true;
}
void StagingTexture2DBuffer::Unmap()
{
_assert_(m_map_pointer);
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
m_map_pointer = nullptr;
m_map_offset = 0;
m_map_size = 0;
}
std::unique_ptr<StagingTexture2D>
StagingTexture2DBuffer::Create(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format)
{
// Assume tight packing.
u32 row_stride = Util::GetTexelSize(format) * width;
u32 buffer_size = row_stride * height;
VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkBufferCreateInfo buffer_create_info = {
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkBufferCreateFlags flags
buffer_size, // VkDeviceSize size
usage, // VkBufferUsageFlags usage
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
0, // uint32_t queueFamilyIndexCount
nullptr // const uint32_t* pQueueFamilyIndices
};
VkBuffer buffer;
VkResult res =
vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return nullptr;
}
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements);
bool is_coherent;
u32 memory_type_index;
if (type == STAGING_BUFFER_TYPE_READBACK)
{
memory_type_index =
g_vulkan_context->GetReadbackMemoryType(memory_requirements.memoryTypeBits, &is_coherent);
}
else
{
memory_type_index =
g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits, &is_coherent);
}
VkMemoryAllocateInfo memory_allocate_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
memory_requirements.size, // VkDeviceSize allocationSize
memory_type_index // uint32_t memoryTypeIndex
};
VkDeviceMemory memory;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
return nullptr;
}
res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr);
return nullptr;
}
return std::make_unique<StagingTexture2DBuffer>(type, width, height, format, row_stride, buffer,
memory, buffer_size, is_coherent);
}
} // namespace Vulkan

View File

@ -0,0 +1,125 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "VideoBackends/Vulkan/Constants.h"
namespace Vulkan
{
class StagingTexture2D
{
public:
StagingTexture2D(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format, u32 stride);
virtual ~StagingTexture2D();
STAGING_BUFFER_TYPE GetType() const { return m_type; }
u32 GetWidth() const { return m_width; }
u32 GetHeight() const { return m_height; }
VkFormat GetFormat() const { return m_format; }
u32 GetRowStride() const { return m_row_stride; }
u32 GetTexelSize() const { return m_texel_size; }
bool IsMapped() const { return m_map_pointer != nullptr; }
const char* GetMapPointer() const { return m_map_pointer; }
char* GetMapPointer() { return m_map_pointer; }
VkDeviceSize GetMapOffset() const { return m_map_offset; }
VkDeviceSize GetMapSize() const { return m_map_size; }
const char* GetRowPointer(u32 row) const { return m_map_pointer + row * m_row_stride; }
char* GetRowPointer(u32 row) { return m_map_pointer + row * m_row_stride; }
// Requires Map() to be called first.
void ReadTexel(u32 x, u32 y, void* data, size_t data_size) const;
void WriteTexel(u32 x, u32 y, const void* data, size_t data_size);
void ReadTexels(u32 x, u32 y, u32 width, u32 height, void* data, u32 data_stride) const;
void WriteTexels(u32 x, u32 y, u32 width, u32 height, const void* data, u32 data_stride);
// Assumes that image is in TRANSFER_SRC layout.
// Results are not ready until command_buffer has been executed.
virtual void CopyFromImage(VkCommandBuffer command_buffer, VkImage image,
VkImageAspectFlags src_aspect, u32 x, u32 y, u32 width, u32 height,
u32 level, u32 layer) = 0;
// Assumes that image is in TRANSFER_DST layout.
// Buffer is not safe for re-use until after command_buffer has been executed.
virtual void CopyToImage(VkCommandBuffer command_buffer, VkImage image,
VkImageAspectFlags dst_aspect, u32 x, u32 y, u32 width, u32 height,
u32 level, u32 layer) = 0;
virtual bool Map(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE) = 0;
virtual void Unmap() = 0;
// Creates the optimal format of image copy.
static std::unique_ptr<StagingTexture2D> Create(STAGING_BUFFER_TYPE type, u32 width, u32 height,
VkFormat format);
protected:
STAGING_BUFFER_TYPE m_type;
u32 m_width;
u32 m_height;
VkFormat m_format;
u32 m_texel_size;
u32 m_row_stride;
char* m_map_pointer = nullptr;
VkDeviceSize m_map_offset = 0;
VkDeviceSize m_map_size = 0;
};
class StagingTexture2DLinear : public StagingTexture2D
{
public:
StagingTexture2DLinear(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format,
u32 stride, VkImage image, VkDeviceMemory memory, VkDeviceSize size,
bool coherent);
~StagingTexture2DLinear();
void CopyFromImage(VkCommandBuffer command_buffer, VkImage image, VkImageAspectFlags src_aspect,
u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer) override;
void CopyToImage(VkCommandBuffer command_buffer, VkImage image, VkImageAspectFlags dst_aspect,
u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer) override;
bool Map(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE) override;
void Unmap() override;
static std::unique_ptr<StagingTexture2D> Create(STAGING_BUFFER_TYPE type, u32 width, u32 height,
VkFormat format);
private:
VkImage m_image;
VkDeviceMemory m_memory;
VkDeviceSize m_size;
VkImageLayout m_layout;
bool m_coherent;
};
class StagingTexture2DBuffer : public StagingTexture2D
{
public:
StagingTexture2DBuffer(STAGING_BUFFER_TYPE type, u32 width, u32 height, VkFormat format,
u32 stride, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize size,
bool coherent);
~StagingTexture2DBuffer();
void CopyFromImage(VkCommandBuffer command_buffer, VkImage image, VkImageAspectFlags src_aspect,
u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer) override;
void CopyToImage(VkCommandBuffer command_buffer, VkImage image, VkImageAspectFlags dst_aspect,
u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer) override;
bool Map(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE) override;
void Unmap() override;
static std::unique_ptr<StagingTexture2D> Create(STAGING_BUFFER_TYPE type, u32 width, u32 height,
VkFormat format);
private:
VkBuffer m_buffer;
VkDeviceMemory m_memory;
VkDeviceSize m_size;
bool m_coherent;
};
}

View File

@ -0,0 +1,844 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "Common/Assert.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoBackends/Vulkan/FramebufferManager.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
#include "VideoCommon/GeometryShaderManager.h"
#include "VideoCommon/PixelShaderManager.h"
#include "VideoCommon/Statistics.h"
#include "VideoCommon/VertexShaderManager.h"
#include "VideoCommon/VideoConfig.h"
namespace Vulkan
{
StateTracker::StateTracker()
{
// Set some sensible defaults
m_pipeline_state.pipeline_layout = g_object_cache->GetStandardPipelineLayout();
m_pipeline_state.rasterization_state.cull_mode = VK_CULL_MODE_NONE;
m_pipeline_state.rasterization_state.per_sample_shading = VK_FALSE;
m_pipeline_state.rasterization_state.depth_clamp = VK_FALSE;
m_pipeline_state.depth_stencil_state.test_enable = VK_TRUE;
m_pipeline_state.depth_stencil_state.write_enable = VK_TRUE;
m_pipeline_state.depth_stencil_state.compare_op = VK_COMPARE_OP_LESS;
m_pipeline_state.blend_state.blend_enable = VK_FALSE;
m_pipeline_state.blend_state.blend_op = VK_BLEND_OP_ADD;
m_pipeline_state.blend_state.src_blend = VK_BLEND_FACTOR_ONE;
m_pipeline_state.blend_state.dst_blend = VK_BLEND_FACTOR_ZERO;
m_pipeline_state.blend_state.alpha_blend_op = VK_BLEND_OP_ADD;
m_pipeline_state.blend_state.src_alpha_blend = VK_BLEND_FACTOR_ONE;
m_pipeline_state.blend_state.dst_alpha_blend = VK_BLEND_FACTOR_ZERO;
m_pipeline_state.blend_state.logic_op_enable = VK_FALSE;
m_pipeline_state.blend_state.logic_op = VK_LOGIC_OP_CLEAR;
m_pipeline_state.blend_state.write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
// Enable depth clamping if supported by driver.
if (g_ActiveConfig.backend_info.bSupportsDepthClamp)
m_pipeline_state.rasterization_state.depth_clamp = VK_TRUE;
// BBox is disabled by default.
m_pipeline_state.pipeline_layout = g_object_cache->GetStandardPipelineLayout();
m_num_active_descriptor_sets = NUM_DESCRIPTOR_SETS - 1;
m_bbox_enabled = false;
// Initialize all samplers to point by default
for (size_t i = 0; i < NUM_PIXEL_SHADER_SAMPLERS; i++)
{
m_bindings.ps_samplers[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
m_bindings.ps_samplers[i].imageView = VK_NULL_HANDLE;
m_bindings.ps_samplers[i].sampler = g_object_cache->GetPointSampler();
}
// Create the streaming uniform buffer
m_uniform_stream_buffer =
StreamBuffer::Create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, INITIAL_UNIFORM_STREAM_BUFFER_SIZE,
MAXIMUM_UNIFORM_STREAM_BUFFER_SIZE);
if (!m_uniform_stream_buffer)
PanicAlert("Failed to create uniform stream buffer");
// The validation layer complains if max(offsets) + max(ubo_ranges) >= ubo_size.
// To work around this we reserve the maximum buffer size at all times, but only commit
// as many bytes as we use.
m_uniform_buffer_reserve_size = sizeof(PixelShaderConstants);
m_uniform_buffer_reserve_size = Util::AlignValue(m_uniform_buffer_reserve_size,
g_vulkan_context->GetUniformBufferAlignment()) +
sizeof(VertexShaderConstants);
m_uniform_buffer_reserve_size = Util::AlignValue(m_uniform_buffer_reserve_size,
g_vulkan_context->GetUniformBufferAlignment()) +
sizeof(GeometryShaderConstants);
// Default dirty flags include all descriptors
InvalidateDescriptorSets();
SetPendingRebind();
// Set default constants
UploadAllConstants();
}
StateTracker::~StateTracker()
{
}
void StateTracker::SetVertexBuffer(VkBuffer buffer, VkDeviceSize offset)
{
if (m_vertex_buffer == buffer && m_vertex_buffer_offset == offset)
return;
m_vertex_buffer = buffer;
m_vertex_buffer_offset = offset;
m_dirty_flags |= DIRTY_FLAG_VERTEX_BUFFER;
}
void StateTracker::SetIndexBuffer(VkBuffer buffer, VkDeviceSize offset, VkIndexType type)
{
if (m_index_buffer == buffer && m_index_buffer_offset == offset && m_index_type == type)
return;
m_index_buffer = buffer;
m_index_buffer_offset = offset;
m_index_type = type;
m_dirty_flags |= DIRTY_FLAG_INDEX_BUFFER;
}
void StateTracker::SetRenderPass(VkRenderPass render_pass)
{
// Should not be changed within a render pass.
assert(!m_in_render_pass);
if (m_pipeline_state.render_pass == render_pass)
return;
m_pipeline_state.render_pass = render_pass;
m_dirty_flags |= DIRTY_FLAG_PIPELINE;
}
void StateTracker::SetFramebuffer(VkFramebuffer framebuffer, const VkRect2D& render_area)
{
// Should not be changed within a render pass.
assert(!m_in_render_pass);
m_framebuffer = framebuffer;
m_framebuffer_render_area = render_area;
}
void StateTracker::SetVertexFormat(const VertexFormat* vertex_format)
{
if (m_pipeline_state.vertex_format == vertex_format)
return;
m_pipeline_state.vertex_format = vertex_format;
m_dirty_flags |= DIRTY_FLAG_PIPELINE;
}
void StateTracker::SetPrimitiveTopology(VkPrimitiveTopology primitive_topology)
{
if (m_pipeline_state.primitive_topology == primitive_topology)
return;
m_pipeline_state.primitive_topology = primitive_topology;
m_dirty_flags |= DIRTY_FLAG_PIPELINE;
}
void StateTracker::DisableBackFaceCulling()
{
if (m_pipeline_state.rasterization_state.cull_mode == VK_CULL_MODE_NONE)
return;
m_pipeline_state.rasterization_state.cull_mode = VK_CULL_MODE_NONE;
m_dirty_flags |= DIRTY_FLAG_PIPELINE;
}
void StateTracker::SetRasterizationState(const RasterizationState& state)
{
if (m_pipeline_state.rasterization_state.bits == state.bits)
return;
m_pipeline_state.rasterization_state.bits = state.bits;
m_dirty_flags |= DIRTY_FLAG_PIPELINE;
}
void StateTracker::SetDepthStencilState(const DepthStencilState& state)
{
if (m_pipeline_state.depth_stencil_state.bits == state.bits)
return;
m_pipeline_state.depth_stencil_state.bits = state.bits;
m_dirty_flags |= DIRTY_FLAG_PIPELINE;
}
void StateTracker::SetBlendState(const BlendState& state)
{
if (m_pipeline_state.blend_state.bits == state.bits)
return;
m_pipeline_state.blend_state.bits = state.bits;
m_dirty_flags |= DIRTY_FLAG_PIPELINE;
}
bool StateTracker::CheckForShaderChanges(u32 gx_primitive_type, DSTALPHA_MODE dstalpha_mode)
{
VertexShaderUid vs_uid = GetVertexShaderUid();
PixelShaderUid ps_uid = GetPixelShaderUid(dstalpha_mode);
bool changed = false;
if (vs_uid != m_vs_uid)
{
m_pipeline_state.vs = g_object_cache->GetVertexShaderForUid(vs_uid);
m_vs_uid = vs_uid;
changed = true;
}
if (g_vulkan_context->SupportsGeometryShaders())
{
GeometryShaderUid gs_uid = GetGeometryShaderUid(gx_primitive_type);
if (gs_uid != m_gs_uid)
{
if (gs_uid.GetUidData()->IsPassthrough())
m_pipeline_state.gs = VK_NULL_HANDLE;
else
m_pipeline_state.gs = g_object_cache->GetGeometryShaderForUid(gs_uid);
m_gs_uid = gs_uid;
changed = true;
}
}
if (ps_uid != m_ps_uid)
{
m_pipeline_state.ps = g_object_cache->GetPixelShaderForUid(ps_uid, dstalpha_mode);
m_ps_uid = ps_uid;
changed = true;
}
if (m_dstalpha_mode != dstalpha_mode)
{
// Switching to/from alpha pass requires a pipeline change, since the blend state
// is overridden in the destination alpha pass.
if (m_dstalpha_mode == DSTALPHA_ALPHA_PASS || dstalpha_mode == DSTALPHA_ALPHA_PASS)
changed = true;
m_dstalpha_mode = dstalpha_mode;
}
if (changed)
m_dirty_flags |= DIRTY_FLAG_PIPELINE;
return changed;
}
void StateTracker::UpdateVertexShaderConstants()
{
if (!VertexShaderManager::dirty)
return;
// Since the other stages uniform buffers' may be still be using the earlier data,
// we can't reuse the earlier part of the buffer without re-uploading everything.
if (!m_uniform_stream_buffer->ReserveMemory(m_uniform_buffer_reserve_size,
g_vulkan_context->GetUniformBufferAlignment(), false,
false, false))
{
// Re-upload all constants to a new portion of the buffer.
UploadAllConstants();
return;
}
// Buffer allocation changed?
if (m_uniform_stream_buffer->GetBuffer() !=
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_VS].buffer)
{
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_VS].buffer =
m_uniform_stream_buffer->GetBuffer();
m_dirty_flags |= DIRTY_FLAG_VS_UBO;
}
m_bindings.uniform_buffer_offsets[UBO_DESCRIPTOR_SET_BINDING_VS] =
static_cast<uint32_t>(m_uniform_stream_buffer->GetCurrentOffset());
m_dirty_flags |= DIRTY_FLAG_DYNAMIC_OFFSETS;
memcpy(m_uniform_stream_buffer->GetCurrentHostPointer(), &VertexShaderManager::constants,
sizeof(VertexShaderConstants));
ADDSTAT(stats.thisFrame.bytesUniformStreamed, sizeof(VertexShaderConstants));
m_uniform_stream_buffer->CommitMemory(sizeof(VertexShaderConstants));
VertexShaderManager::dirty = false;
}
void StateTracker::UpdateGeometryShaderConstants()
{
// Skip updating geometry shader constants if it's not in use.
if (m_pipeline_state.gs == VK_NULL_HANDLE || !GeometryShaderManager::dirty)
return;
// Since the other stages uniform buffers' may be still be using the earlier data,
// we can't reuse the earlier part of the buffer without re-uploading everything.
if (!m_uniform_stream_buffer->ReserveMemory(m_uniform_buffer_reserve_size,
g_vulkan_context->GetUniformBufferAlignment(), false,
false, false))
{
// Re-upload all constants to a new portion of the buffer.
UploadAllConstants();
return;
}
// Buffer allocation changed?
if (m_uniform_stream_buffer->GetBuffer() !=
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_GS].buffer)
{
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_GS].buffer =
m_uniform_stream_buffer->GetBuffer();
m_dirty_flags |= DIRTY_FLAG_GS_UBO;
}
m_bindings.uniform_buffer_offsets[UBO_DESCRIPTOR_SET_BINDING_GS] =
static_cast<uint32_t>(m_uniform_stream_buffer->GetCurrentOffset());
m_dirty_flags |= DIRTY_FLAG_DYNAMIC_OFFSETS;
memcpy(m_uniform_stream_buffer->GetCurrentHostPointer(), &GeometryShaderManager::constants,
sizeof(GeometryShaderConstants));
ADDSTAT(stats.thisFrame.bytesUniformStreamed, sizeof(GeometryShaderConstants));
m_uniform_stream_buffer->CommitMemory(sizeof(GeometryShaderConstants));
GeometryShaderManager::dirty = false;
}
void StateTracker::UpdatePixelShaderConstants()
{
if (!PixelShaderManager::dirty)
return;
// Since the other stages uniform buffers' may be still be using the earlier data,
// we can't reuse the earlier part of the buffer without re-uploading everything.
if (!m_uniform_stream_buffer->ReserveMemory(m_uniform_buffer_reserve_size,
g_vulkan_context->GetUniformBufferAlignment(), false,
false, false))
{
// Re-upload all constants to a new portion of the buffer.
UploadAllConstants();
return;
}
// Buffer allocation changed?
if (m_uniform_stream_buffer->GetBuffer() !=
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_PS].buffer)
{
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_PS].buffer =
m_uniform_stream_buffer->GetBuffer();
m_dirty_flags |= DIRTY_FLAG_PS_UBO;
}
m_bindings.uniform_buffer_offsets[UBO_DESCRIPTOR_SET_BINDING_PS] =
static_cast<uint32_t>(m_uniform_stream_buffer->GetCurrentOffset());
m_dirty_flags |= DIRTY_FLAG_DYNAMIC_OFFSETS;
memcpy(m_uniform_stream_buffer->GetCurrentHostPointer(), &PixelShaderManager::constants,
sizeof(PixelShaderConstants));
ADDSTAT(stats.thisFrame.bytesUniformStreamed, sizeof(PixelShaderConstants));
m_uniform_stream_buffer->CommitMemory(sizeof(PixelShaderConstants));
PixelShaderManager::dirty = false;
}
void StateTracker::UploadAllConstants()
{
// We are free to re-use parts of the buffer now since we're uploading all constants.
size_t pixel_constants_offset = 0;
size_t vertex_constants_offset =
Util::AlignValue(pixel_constants_offset + sizeof(PixelShaderConstants),
g_vulkan_context->GetUniformBufferAlignment());
size_t geometry_constants_offset =
Util::AlignValue(vertex_constants_offset + sizeof(VertexShaderConstants),
g_vulkan_context->GetUniformBufferAlignment());
size_t total_allocation_size = geometry_constants_offset + sizeof(GeometryShaderConstants);
// Allocate everything at once.
if (!m_uniform_stream_buffer->ReserveMemory(
total_allocation_size, g_vulkan_context->GetUniformBufferAlignment(), true, true, false))
{
// If this fails, wait until the GPU has caught up.
// The only places that call constant updates are safe to have state restored.
WARN_LOG(VIDEO, "Executing command list while waiting for space in uniform buffer");
Util::ExecuteCurrentCommandsAndRestoreState(this, false);
if (!m_uniform_stream_buffer->ReserveMemory(total_allocation_size,
g_vulkan_context->GetUniformBufferAlignment(), true,
true, false))
{
PanicAlert("Failed to allocate space for constants in streaming buffer");
return;
}
}
// Update bindings
for (size_t i = 0; i < NUM_UBO_DESCRIPTOR_SET_BINDINGS; i++)
{
m_bindings.uniform_buffer_bindings[i].buffer = m_uniform_stream_buffer->GetBuffer();
m_bindings.uniform_buffer_bindings[i].offset = 0;
}
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_PS].range =
sizeof(PixelShaderConstants);
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_VS].range =
sizeof(VertexShaderConstants);
m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_GS].range =
sizeof(GeometryShaderConstants);
// Update dynamic offsets
m_bindings.uniform_buffer_offsets[UBO_DESCRIPTOR_SET_BINDING_PS] =
static_cast<uint32_t>(m_uniform_stream_buffer->GetCurrentOffset() + pixel_constants_offset);
m_bindings.uniform_buffer_offsets[UBO_DESCRIPTOR_SET_BINDING_VS] =
static_cast<uint32_t>(m_uniform_stream_buffer->GetCurrentOffset() + vertex_constants_offset);
m_bindings.uniform_buffer_offsets[UBO_DESCRIPTOR_SET_BINDING_GS] = static_cast<uint32_t>(
m_uniform_stream_buffer->GetCurrentOffset() + geometry_constants_offset);
m_dirty_flags |= DIRTY_FLAG_ALL_DESCRIPTOR_SETS | DIRTY_FLAG_DYNAMIC_OFFSETS | DIRTY_FLAG_VS_UBO |
DIRTY_FLAG_GS_UBO | DIRTY_FLAG_PS_UBO;
// Copy the actual data in
memcpy(m_uniform_stream_buffer->GetCurrentHostPointer() + pixel_constants_offset,
&PixelShaderManager::constants, sizeof(PixelShaderConstants));
memcpy(m_uniform_stream_buffer->GetCurrentHostPointer() + vertex_constants_offset,
&VertexShaderManager::constants, sizeof(VertexShaderConstants));
memcpy(m_uniform_stream_buffer->GetCurrentHostPointer() + geometry_constants_offset,
&GeometryShaderManager::constants, sizeof(GeometryShaderConstants));
// Finally, flush buffer memory after copying
m_uniform_stream_buffer->CommitMemory(total_allocation_size);
// Clear dirty flags
VertexShaderManager::dirty = false;
GeometryShaderManager::dirty = false;
PixelShaderManager::dirty = false;
}
void StateTracker::SetTexture(size_t index, VkImageView view)
{
if (m_bindings.ps_samplers[index].imageView == view)
return;
m_bindings.ps_samplers[index].imageView = view;
m_bindings.ps_samplers[index].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
m_dirty_flags |= DIRTY_FLAG_PS_SAMPLERS;
}
void StateTracker::SetSampler(size_t index, VkSampler sampler)
{
if (m_bindings.ps_samplers[index].sampler == sampler)
return;
m_bindings.ps_samplers[index].sampler = sampler;
m_dirty_flags |= DIRTY_FLAG_PS_SAMPLERS;
}
void StateTracker::SetBBoxEnable(bool enable)
{
if (m_bbox_enabled == enable)
return;
// Change the number of active descriptor sets, as well as the pipeline layout
if (enable)
{
m_pipeline_state.pipeline_layout = g_object_cache->GetBBoxPipelineLayout();
m_num_active_descriptor_sets = NUM_DESCRIPTOR_SETS;
// The bbox buffer never changes, so we defer descriptor updates until it is enabled.
if (m_descriptor_sets[DESCRIPTOR_SET_SHADER_STORAGE_BUFFERS] == VK_NULL_HANDLE)
m_dirty_flags |= DIRTY_FLAG_PS_SSBO;
}
else
{
m_pipeline_state.pipeline_layout = g_object_cache->GetStandardPipelineLayout();
m_num_active_descriptor_sets = NUM_DESCRIPTOR_SETS - 1;
}
m_dirty_flags |= DIRTY_FLAG_PIPELINE | DIRTY_FLAG_DESCRIPTOR_SET_BINDING;
m_bbox_enabled = enable;
}
void StateTracker::SetBBoxBuffer(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize range)
{
if (m_bindings.ps_ssbo.buffer == buffer && m_bindings.ps_ssbo.offset == offset &&
m_bindings.ps_ssbo.range == range)
{
return;
}
m_bindings.ps_ssbo.buffer = buffer;
m_bindings.ps_ssbo.offset = offset;
m_bindings.ps_ssbo.range = range;
// Defer descriptor update until bbox is actually enabled.
if (m_bbox_enabled)
m_dirty_flags |= DIRTY_FLAG_PS_SSBO;
}
void StateTracker::UnbindTexture(VkImageView view)
{
for (VkDescriptorImageInfo& it : m_bindings.ps_samplers)
{
if (it.imageView == view)
it.imageView = VK_NULL_HANDLE;
}
}
void StateTracker::InvalidateDescriptorSets()
{
m_descriptor_sets.fill(VK_NULL_HANDLE);
m_dirty_flags |= DIRTY_FLAG_ALL_DESCRIPTOR_SETS;
// Defer SSBO descriptor update until bbox is actually enabled.
if (!m_bbox_enabled)
m_dirty_flags &= ~DIRTY_FLAG_PS_SSBO;
}
void StateTracker::SetPendingRebind()
{
m_dirty_flags |= DIRTY_FLAG_DYNAMIC_OFFSETS | DIRTY_FLAG_DESCRIPTOR_SET_BINDING |
DIRTY_FLAG_PIPELINE_BINDING | DIRTY_FLAG_VERTEX_BUFFER |
DIRTY_FLAG_INDEX_BUFFER | DIRTY_FLAG_VIEWPORT | DIRTY_FLAG_SCISSOR |
DIRTY_FLAG_PIPELINE;
}
void StateTracker::BeginRenderPass()
{
if (m_in_render_pass)
return;
VkRenderPassBeginInfo begin_info = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
nullptr,
m_pipeline_state.render_pass,
m_framebuffer,
m_framebuffer_render_area,
0,
nullptr};
vkCmdBeginRenderPass(g_command_buffer_mgr->GetCurrentCommandBuffer(), &begin_info,
VK_SUBPASS_CONTENTS_INLINE);
m_in_render_pass = true;
}
void StateTracker::EndRenderPass()
{
if (!m_in_render_pass)
return;
vkCmdEndRenderPass(g_command_buffer_mgr->GetCurrentCommandBuffer());
m_in_render_pass = false;
}
void StateTracker::SetViewport(const VkViewport& viewport)
{
if (memcmp(&m_viewport, &viewport, sizeof(viewport)) == 0)
return;
m_viewport = viewport;
m_dirty_flags |= DIRTY_FLAG_VIEWPORT;
}
void StateTracker::SetScissor(const VkRect2D& scissor)
{
if (memcmp(&m_scissor, &scissor, sizeof(scissor)) == 0)
return;
m_scissor = scissor;
m_dirty_flags |= DIRTY_FLAG_SCISSOR;
}
bool StateTracker::Bind(bool rebind_all /*= false*/)
{
// Get new pipeline object if any parts have changed
if (m_dirty_flags & DIRTY_FLAG_PIPELINE && !UpdatePipeline())
{
ERROR_LOG(VIDEO, "Failed to get pipeline object, skipping draw");
return false;
}
// Get a new descriptor set if any parts have changed
if (m_dirty_flags & DIRTY_FLAG_ALL_DESCRIPTOR_SETS && !UpdateDescriptorSet())
{
// We can fail to allocate descriptors if we exhaust the pool for this command buffer.
WARN_LOG(VIDEO, "Failed to get a descriptor set, executing buffer");
// Try again after executing the current buffer.
g_command_buffer_mgr->ExecuteCommandBuffer(false, false);
InvalidateDescriptorSets();
SetPendingRebind();
if (!UpdateDescriptorSet())
{
// Something strange going on.
ERROR_LOG(VIDEO, "Failed to get descriptor set, skipping draw");
return false;
}
}
// Start render pass if not already started
if (!m_in_render_pass)
BeginRenderPass();
// Re-bind parts of the pipeline
VkCommandBuffer command_buffer = g_command_buffer_mgr->GetCurrentCommandBuffer();
if (m_dirty_flags & DIRTY_FLAG_VERTEX_BUFFER || rebind_all)
vkCmdBindVertexBuffers(command_buffer, 0, 1, &m_vertex_buffer, &m_vertex_buffer_offset);
if (m_dirty_flags & DIRTY_FLAG_INDEX_BUFFER || rebind_all)
vkCmdBindIndexBuffer(command_buffer, m_index_buffer, m_index_buffer_offset, m_index_type);
if (m_dirty_flags & DIRTY_FLAG_PIPELINE_BINDING || rebind_all)
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline_object);
if (m_dirty_flags & DIRTY_FLAG_DESCRIPTOR_SET_BINDING || rebind_all)
{
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
m_pipeline_state.pipeline_layout, 0, m_num_active_descriptor_sets,
m_descriptor_sets.data(), NUM_UBO_DESCRIPTOR_SET_BINDINGS,
m_bindings.uniform_buffer_offsets.data());
}
else if (m_dirty_flags & DIRTY_FLAG_DYNAMIC_OFFSETS)
{
vkCmdBindDescriptorSets(
command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipeline_state.pipeline_layout,
DESCRIPTOR_SET_UNIFORM_BUFFERS, 1, &m_descriptor_sets[DESCRIPTOR_SET_UNIFORM_BUFFERS],
NUM_UBO_DESCRIPTOR_SET_BINDINGS, m_bindings.uniform_buffer_offsets.data());
}
if (m_dirty_flags & DIRTY_FLAG_VIEWPORT || rebind_all)
vkCmdSetViewport(command_buffer, 0, 1, &m_viewport);
if (m_dirty_flags & DIRTY_FLAG_SCISSOR || rebind_all)
vkCmdSetScissor(command_buffer, 0, 1, &m_scissor);
m_dirty_flags = 0;
return true;
}
void StateTracker::OnDraw()
{
m_draw_counter++;
// If we didn't have any CPU access last frame, do nothing.
if (m_scheduled_command_buffer_kicks.empty() || !m_allow_background_execution)
return;
// Check if this draw is scheduled to kick a command buffer.
// The draw counters will always be sorted so a binary search is possible here.
if (std::binary_search(m_scheduled_command_buffer_kicks.begin(),
m_scheduled_command_buffer_kicks.end(), m_draw_counter))
{
// Kick a command buffer on the background thread.
EndRenderPass();
g_command_buffer_mgr->ExecuteCommandBuffer(true, false);
InvalidateDescriptorSets();
SetPendingRebind();
}
}
void StateTracker::OnReadback()
{
// Check this isn't another access without any draws inbetween.
if (!m_cpu_accesses_this_frame.empty() && m_cpu_accesses_this_frame.back() == m_draw_counter)
return;
// Store the current draw counter for scheduling in OnEndFrame.
m_cpu_accesses_this_frame.emplace_back(m_draw_counter);
}
void StateTracker::OnEndFrame()
{
m_draw_counter = 0;
m_scheduled_command_buffer_kicks.clear();
// If we have no CPU access at all, leave everything in the one command buffer for maximum
// parallelism between CPU/GPU, at the cost of slightly higher latency.
if (m_cpu_accesses_this_frame.empty())
return;
// In order to reduce CPU readback latency, we want to kick a command buffer roughly halfway
// between the draw counters that invoked the readback, or every 250 draws, whichever is smaller.
if (g_ActiveConfig.iCommandBufferExecuteInterval > 0)
{
u32 last_draw_counter = 0;
u32 interval = static_cast<u32>(g_ActiveConfig.iCommandBufferExecuteInterval);
for (u32 draw_counter : m_cpu_accesses_this_frame)
{
u32 draw_count = draw_counter - last_draw_counter;
if (draw_count <= interval)
{
u32 mid_point = draw_count / 2;
m_scheduled_command_buffer_kicks.emplace_back(last_draw_counter + mid_point);
}
else
{
u32 counter = interval;
while (counter < draw_count)
{
m_scheduled_command_buffer_kicks.emplace_back(last_draw_counter + counter);
counter += interval;
}
}
}
}
#if 0
{
std::stringstream ss;
std::for_each(m_cpu_accesses_this_frame.begin(), m_cpu_accesses_this_frame.end(), [&ss](u32 idx) { ss << idx << ","; });
WARN_LOG(VIDEO, "CPU EFB accesses in last frame: %s", ss.str().c_str());
}
{
std::stringstream ss;
std::for_each(m_scheduled_command_buffer_kicks.begin(), m_scheduled_command_buffer_kicks.end(), [&ss](u32 idx) { ss << idx << ","; });
WARN_LOG(VIDEO, "Scheduled command buffer kicks: %s", ss.str().c_str());
}
#endif
m_cpu_accesses_this_frame.clear();
}
void StateTracker::SetBackgroundCommandBufferExecution(bool enabled)
{
m_allow_background_execution = enabled;
}
bool StateTracker::UpdatePipeline()
{
// We need at least a vertex and fragment shader
if (m_pipeline_state.vs == VK_NULL_HANDLE || m_pipeline_state.ps == VK_NULL_HANDLE)
return false;
// Grab a new pipeline object, this can fail
if (m_dstalpha_mode != DSTALPHA_ALPHA_PASS)
{
m_pipeline_object = g_object_cache->GetPipeline(m_pipeline_state);
if (m_pipeline_object == VK_NULL_HANDLE)
return false;
}
else
{
// We need to make a few modifications to the pipeline object, but retain
// the existing state, since we don't want to break the next draw.
PipelineInfo temp_info = m_pipeline_state;
// Skip depth writes for this pass. The results will be the same, so no
// point in overwriting depth values with the same value.
temp_info.depth_stencil_state.write_enable = VK_FALSE;
// Only allow alpha writes, and disable blending.
temp_info.blend_state.blend_enable = VK_FALSE;
temp_info.blend_state.logic_op_enable = VK_FALSE;
temp_info.blend_state.write_mask = VK_COLOR_COMPONENT_A_BIT;
m_pipeline_object = g_object_cache->GetPipeline(temp_info);
if (m_pipeline_object == VK_NULL_HANDLE)
return false;
}
m_dirty_flags |= DIRTY_FLAG_PIPELINE_BINDING;
return true;
}
bool StateTracker::UpdateDescriptorSet()
{
const size_t MAX_DESCRIPTOR_WRITES = NUM_UBO_DESCRIPTOR_SET_BINDINGS + // UBO
NUM_PIXEL_SHADER_SAMPLERS + // Samplers
1; // SSBO
std::array<VkWriteDescriptorSet, MAX_DESCRIPTOR_WRITES> writes;
u32 num_writes = 0;
if (m_dirty_flags & (DIRTY_FLAG_VS_UBO | DIRTY_FLAG_GS_UBO | DIRTY_FLAG_PS_UBO) ||
m_descriptor_sets[DESCRIPTOR_SET_UNIFORM_BUFFERS] == VK_NULL_HANDLE)
{
VkDescriptorSetLayout layout =
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_UNIFORM_BUFFERS);
VkDescriptorSet set = g_command_buffer_mgr->AllocateDescriptorSet(layout);
if (set == VK_NULL_HANDLE)
return false;
for (size_t i = 0; i < NUM_UBO_DESCRIPTOR_SET_BINDINGS; i++)
{
writes[num_writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
nullptr,
set,
static_cast<uint32_t>(i),
0,
1,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
nullptr,
&m_bindings.uniform_buffer_bindings[i],
nullptr};
}
m_descriptor_sets[DESCRIPTOR_SET_UNIFORM_BUFFERS] = set;
m_dirty_flags |= DIRTY_FLAG_DESCRIPTOR_SET_BINDING;
}
if (m_dirty_flags & DIRTY_FLAG_PS_SAMPLERS ||
m_descriptor_sets[DESCRIPTOR_SET_PIXEL_SHADER_SAMPLERS] == VK_NULL_HANDLE)
{
VkDescriptorSetLayout layout =
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_PIXEL_SHADER_SAMPLERS);
VkDescriptorSet set = g_command_buffer_mgr->AllocateDescriptorSet(layout);
if (set == VK_NULL_HANDLE)
return false;
for (size_t i = 0; i < NUM_PIXEL_SHADER_SAMPLERS; i++)
{
const VkDescriptorImageInfo& info = m_bindings.ps_samplers[i];
if (info.imageView != VK_NULL_HANDLE && info.sampler != VK_NULL_HANDLE)
{
writes[num_writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
nullptr,
set,
static_cast<uint32_t>(i),
0,
1,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
&info,
nullptr,
nullptr};
}
}
m_descriptor_sets[DESCRIPTOR_SET_PIXEL_SHADER_SAMPLERS] = set;
m_dirty_flags |= DIRTY_FLAG_DESCRIPTOR_SET_BINDING;
}
if (m_bbox_enabled &&
(m_dirty_flags & DIRTY_FLAG_PS_SSBO ||
m_descriptor_sets[DESCRIPTOR_SET_SHADER_STORAGE_BUFFERS] == VK_NULL_HANDLE))
{
VkDescriptorSetLayout layout =
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_SHADER_STORAGE_BUFFERS);
VkDescriptorSet set = g_command_buffer_mgr->AllocateDescriptorSet(layout);
if (set == VK_NULL_HANDLE)
return false;
writes[num_writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
nullptr,
set,
0,
0,
1,
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
nullptr,
&m_bindings.ps_ssbo,
nullptr};
m_descriptor_sets[DESCRIPTOR_SET_SHADER_STORAGE_BUFFERS] = set;
m_dirty_flags |= DIRTY_FLAG_DESCRIPTOR_SET_BINDING;
}
if (num_writes > 0)
vkUpdateDescriptorSets(g_vulkan_context->GetDevice(), num_writes, writes.data(), 0, nullptr);
return true;
}
} // namespace Vulkan

View File

@ -0,0 +1,175 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoCommon/GeometryShaderGen.h"
#include "VideoCommon/PixelShaderGen.h"
#include "VideoCommon/RenderBase.h"
#include "VideoCommon/VertexShaderGen.h"
namespace Vulkan
{
class StreamBuffer;
class VertexFormat;
class StateTracker
{
public:
StateTracker();
~StateTracker();
const RasterizationState& GetRasterizationState() const
{
return m_pipeline_state.rasterization_state;
}
const DepthStencilState& GetDepthStencilState() const
{
return m_pipeline_state.depth_stencil_state;
}
const BlendState& GetBlendState() const { return m_pipeline_state.blend_state; }
void SetVertexBuffer(VkBuffer buffer, VkDeviceSize offset);
void SetIndexBuffer(VkBuffer buffer, VkDeviceSize offset, VkIndexType type);
void SetRenderPass(VkRenderPass render_pass);
void SetFramebuffer(VkFramebuffer framebuffer, const VkRect2D& render_area);
void SetVertexFormat(const VertexFormat* vertex_format);
void SetPrimitiveTopology(VkPrimitiveTopology primitive_topology);
void DisableBackFaceCulling();
void SetRasterizationState(const RasterizationState& state);
void SetDepthStencilState(const DepthStencilState& state);
void SetBlendState(const BlendState& state);
bool CheckForShaderChanges(u32 gx_primitive_type, DSTALPHA_MODE dstalpha_mode);
void UpdateVertexShaderConstants();
void UpdateGeometryShaderConstants();
void UpdatePixelShaderConstants();
void SetTexture(size_t index, VkImageView view);
void SetSampler(size_t index, VkSampler sampler);
void SetBBoxEnable(bool enable);
void SetBBoxBuffer(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize range);
void UnbindTexture(VkImageView view);
// When executing a command buffer, we want to recreate the descriptor set, as it will
// now be in a different pool for the new command buffer.
void InvalidateDescriptorSets();
// Set dirty flags on everything to force re-bind at next draw time.
void SetPendingRebind();
// Ends a render pass if we're currently in one.
// When Bind() is next called, the pass will be restarted.
// Calling this function is allowed even if a pass has not begun.
bool InRenderPass() const { return m_current_render_pass != VK_NULL_HANDLE; }
void BeginRenderPass();
void EndRenderPass();
void SetViewport(const VkViewport& viewport);
void SetScissor(const VkRect2D& scissor);
bool Bind(bool rebind_all = false);
// CPU Access Tracking
// Call after a draw call is made.
void OnDraw();
// Call after CPU access is requested.
// This can be via EFBCache or EFB2RAM.
void OnReadback();
// Call at the end of a frame.
void OnEndFrame();
// Prevent/allow background command buffer execution.
// Use when queries are active.
void SetBackgroundCommandBufferExecution(bool enabled);
private:
bool UpdatePipeline();
bool UpdateDescriptorSet();
void UploadAllConstants();
enum DITRY_FLAG : u32
{
DIRTY_FLAG_VS_UBO = (1 << 0),
DIRTY_FLAG_GS_UBO = (1 << 1),
DIRTY_FLAG_PS_UBO = (1 << 2),
DIRTY_FLAG_PS_SAMPLERS = (1 << 3),
DIRTY_FLAG_PS_SSBO = (1 << 4),
DIRTY_FLAG_DYNAMIC_OFFSETS = (1 << 5),
DIRTY_FLAG_VERTEX_BUFFER = (1 << 6),
DIRTY_FLAG_INDEX_BUFFER = (1 << 7),
DIRTY_FLAG_VIEWPORT = (1 << 8),
DIRTY_FLAG_SCISSOR = (1 << 9),
DIRTY_FLAG_PIPELINE = (1 << 10),
DIRTY_FLAG_DESCRIPTOR_SET_BINDING = (1 << 11),
DIRTY_FLAG_PIPELINE_BINDING = (1 << 12),
DIRTY_FLAG_ALL_DESCRIPTOR_SETS =
DIRTY_FLAG_VS_UBO | DIRTY_FLAG_GS_UBO | DIRTY_FLAG_PS_SAMPLERS | DIRTY_FLAG_PS_SSBO
};
u32 m_dirty_flags = 0;
// input assembly
VkBuffer m_vertex_buffer = VK_NULL_HANDLE;
VkDeviceSize m_vertex_buffer_offset = 0;
VkBuffer m_index_buffer = VK_NULL_HANDLE;
VkDeviceSize m_index_buffer_offset = 0;
VkIndexType m_index_type = VK_INDEX_TYPE_UINT16;
// shader state
VertexShaderUid m_vs_uid = {};
GeometryShaderUid m_gs_uid = {};
PixelShaderUid m_ps_uid = {};
// pipeline state
PipelineInfo m_pipeline_state = {};
DSTALPHA_MODE m_dstalpha_mode = DSTALPHA_NONE;
VkPipeline m_pipeline_object = VK_NULL_HANDLE;
// shader bindings
std::array<VkDescriptorSet, NUM_DESCRIPTOR_SETS> m_descriptor_sets = {};
struct
{
std::array<VkDescriptorBufferInfo, NUM_UBO_DESCRIPTOR_SET_BINDINGS> uniform_buffer_bindings =
{};
std::array<uint32_t, NUM_UBO_DESCRIPTOR_SET_BINDINGS> uniform_buffer_offsets = {};
std::array<VkDescriptorImageInfo, NUM_PIXEL_SHADER_SAMPLERS> ps_samplers = {};
VkDescriptorBufferInfo ps_ssbo = {};
} m_bindings;
u32 m_num_active_descriptor_sets = 0;
size_t m_uniform_buffer_reserve_size = 0;
// rasterization
VkViewport m_viewport = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f};
VkRect2D m_scissor = {{0, 0}, {1, 1}};
// uniform buffers
std::unique_ptr<StreamBuffer> m_uniform_stream_buffer;
VkFramebuffer m_framebuffer = VK_NULL_HANDLE;
VkRect2D m_framebuffer_render_area = {};
bool m_in_render_pass = false;
bool m_bbox_enabled = false;
// CPU access tracking
u32 m_draw_counter = 0;
std::vector<u32> m_cpu_accesses_this_frame;
std::vector<u32> m_scheduled_command_buffer_kicks;
bool m_allow_background_execution = true;
};
}

View File

@ -0,0 +1,345 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstdint>
#include "Common/Assert.h"
#include "Common/MsgHandler.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
StreamBuffer::StreamBuffer(VkBufferUsageFlags usage, size_t max_size)
: m_usage(usage), m_maximum_size(max_size)
{
// Add a callback that fires on fence point creation and signal
g_command_buffer_mgr->AddFencePointCallback(
this, std::bind(&StreamBuffer::OnCommandBufferQueued, this, std::placeholders::_1,
std::placeholders::_2),
std::bind(&StreamBuffer::OnCommandBufferExecuted, this, std::placeholders::_1));
}
StreamBuffer::~StreamBuffer()
{
g_command_buffer_mgr->RemoveFencePointCallback(this);
if (m_host_pointer)
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
if (m_buffer != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_buffer);
if (m_memory != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
}
std::unique_ptr<StreamBuffer> StreamBuffer::Create(VkBufferUsageFlags usage, size_t initial_size,
size_t max_size)
{
std::unique_ptr<StreamBuffer> buffer = std::make_unique<StreamBuffer>(usage, max_size);
if (!buffer->ResizeBuffer(initial_size))
return nullptr;
return buffer;
}
bool StreamBuffer::ResizeBuffer(size_t size)
{
// Create the buffer descriptor
VkBufferCreateInfo buffer_create_info = {
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0, // VkBufferCreateFlags flags
static_cast<VkDeviceSize>(size), // VkDeviceSize size
m_usage, // VkBufferUsageFlags usage
VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
0, // uint32_t queueFamilyIndexCount
nullptr // const uint32_t* pQueueFamilyIndices
};
VkBuffer buffer = VK_NULL_HANDLE;
VkResult res =
vkCreateBuffer(g_vulkan_context->GetDevice(), &buffer_create_info, nullptr, &buffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBuffer failed: ");
return false;
}
// Get memory requirements (types etc) for this buffer
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(g_vulkan_context->GetDevice(), buffer, &memory_requirements);
// Aim for a coherent mapping if possible.
u32 memory_type_index = g_vulkan_context->GetUploadMemoryType(memory_requirements.memoryTypeBits,
&m_coherent_mapping);
// Allocate memory for backing this buffer
VkMemoryAllocateInfo memory_allocate_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
memory_requirements.size, // VkDeviceSize allocationSize
memory_type_index // uint32_t memoryTypeIndex
};
VkDeviceMemory memory = VK_NULL_HANDLE;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_allocate_info, nullptr, &memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
return false;
}
// Bind memory to buffer
res = vkBindBufferMemory(g_vulkan_context->GetDevice(), buffer, memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindBufferMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr);
return false;
}
// Map this buffer into user-space
void* mapped_ptr = nullptr;
res = vkMapMemory(g_vulkan_context->GetDevice(), memory, 0, size, 0, &mapped_ptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkMapMemory failed: ");
vkDestroyBuffer(g_vulkan_context->GetDevice(), buffer, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), memory, nullptr);
return false;
}
// Unmap current host pointer (if there was a previous buffer)
if (m_host_pointer)
vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory);
// Destroy the backings for the buffer after the command buffer executes
if (m_buffer != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_buffer);
if (m_memory != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_memory);
// Replace with the new buffer
m_buffer = buffer;
m_memory = memory;
m_host_pointer = reinterpret_cast<u8*>(mapped_ptr);
m_current_size = size;
m_current_offset = 0;
m_current_gpu_position = 0;
m_tracked_fences.clear();
return true;
}
bool StreamBuffer::ReserveMemory(size_t num_bytes, size_t alignment, bool allow_reuse /* = true */,
bool allow_growth /* = true */,
bool reallocate_if_full /* = false */)
{
size_t required_bytes = num_bytes + alignment;
// Check for sane allocations
if (required_bytes > m_maximum_size)
{
PanicAlert("Attempting to allocate %u bytes from a %u byte stream buffer",
static_cast<uint32_t>(num_bytes), static_cast<uint32_t>(m_maximum_size));
return false;
}
// Is the GPU behind or up to date with our current offset?
if (m_current_offset >= m_current_gpu_position)
{
size_t remaining_bytes = m_current_size - m_current_offset;
if (required_bytes <= remaining_bytes)
{
// Place at the current position, after the GPU position.
m_current_offset = Util::AlignBufferOffset(m_current_offset, alignment);
m_last_allocation_size = num_bytes;
return true;
}
// Check for space at the start of the buffer
// We use < here because we don't want to have the case of m_current_offset ==
// m_current_gpu_position. That would mean the code above would assume the
// GPU has caught up to us, which it hasn't.
if (allow_reuse && required_bytes < m_current_gpu_position)
{
// Reset offset to zero, since we're allocating behind the gpu now
m_current_offset = 0;
m_last_allocation_size = num_bytes;
return true;
}
}
// Is the GPU ahead of our current offset?
if (m_current_offset < m_current_gpu_position)
{
// We have from m_current_offset..m_current_gpu_position space to use.
size_t remaining_bytes = m_current_gpu_position - m_current_offset;
if (required_bytes < remaining_bytes)
{
// Place at the current position, since this is still behind the GPU.
m_current_offset = Util::AlignBufferOffset(m_current_offset, alignment);
m_last_allocation_size = num_bytes;
return true;
}
}
// Try to grow the buffer up to the maximum size before waiting.
// Double each time until the maximum size is reached.
if (allow_growth && m_current_size < m_maximum_size)
{
size_t new_size = std::min(std::max(num_bytes, m_current_size * 2), m_maximum_size);
if (ResizeBuffer(new_size))
{
// Allocating from the start of the buffer.
m_last_allocation_size = new_size;
return true;
}
}
// Can we find a fence to wait on that will give us enough memory?
if (allow_reuse && WaitForClearSpace(required_bytes))
{
_assert_(m_current_offset == m_current_gpu_position ||
(m_current_offset + required_bytes) < m_current_gpu_position);
m_current_offset = Util::AlignBufferOffset(m_current_offset, alignment);
m_last_allocation_size = num_bytes;
return true;
}
// If we are not allowed to execute in our current state (e.g. in the middle of a render pass),
// as a last resort, reallocate the buffer. This will incur a performance hit and is not
// encouraged.
if (reallocate_if_full && ResizeBuffer(m_current_size))
{
m_last_allocation_size = num_bytes;
return true;
}
// We tried everything we could, and still couldn't get anything. If we're not at a point
// where the state is known and can be resumed, this is probably a fatal error.
return false;
}
void StreamBuffer::CommitMemory(size_t final_num_bytes)
{
_assert_((m_current_offset + final_num_bytes) <= m_current_size);
_assert_(final_num_bytes <= m_last_allocation_size);
// For non-coherent mappings, flush the memory range
if (!m_coherent_mapping)
{
VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory,
m_current_offset, final_num_bytes};
vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range);
}
m_current_offset += final_num_bytes;
}
void StreamBuffer::OnCommandBufferQueued(VkCommandBuffer command_buffer, VkFence fence)
{
// Don't create a tracking entry if the GPU is caught up with the buffer.
if (m_current_offset == m_current_gpu_position)
return;
// Has the offset changed since the last fence?
if (!m_tracked_fences.empty() && m_tracked_fences.back().second == m_current_offset)
{
// No need to track the new fence, the old one is sufficient.
return;
}
m_tracked_fences.emplace_back(fence, m_current_offset);
}
void StreamBuffer::OnCommandBufferExecuted(VkFence fence)
{
// Locate the entry for this fence (if any, we may have been forced to wait already)
auto iter = std::find_if(m_tracked_fences.begin(), m_tracked_fences.end(),
[fence](const auto& it) { return it.first == fence; });
if (iter != m_tracked_fences.end())
{
// Update the GPU position, and remove any fences before this fence (since
// it is implied that they have been signaled as well, though the callback
// should have removed them already).
m_current_gpu_position = iter->second;
m_tracked_fences.erase(m_tracked_fences.begin(), ++iter);
}
}
bool StreamBuffer::WaitForClearSpace(size_t num_bytes)
{
size_t new_offset = 0;
size_t new_gpu_position = 0;
auto iter = m_tracked_fences.begin();
for (; iter != m_tracked_fences.end(); iter++)
{
// Would this fence bring us in line with the GPU?
size_t gpu_position = iter->second;
if (gpu_position == m_current_offset)
{
// Start at the start of the buffer again.
new_offset = 0;
new_gpu_position = 0;
break;
}
// We can wrap around to the start, behind the GPU, if there is enough space.
// We use > here because otherwise we'd end up lining up with the GPU, and then the
// allocator would assume that the GPU has consumed what we just wrote.
if (m_current_offset >= m_current_gpu_position)
{
// Wrap around to the start (behind the GPU) if there is sufficient space.
if (gpu_position > num_bytes)
{
new_offset = 0;
new_gpu_position = gpu_position;
break;
}
}
else
{
// We're currently allocating behind the GPU. Therefore, if this fence is behind us,
// and it's the last fence in the list (no data has been written after it), we can
// move back to allocating in front of the GPU.
if (gpu_position < m_current_offset)
{
if (std::none_of(iter, m_tracked_fences.end(),
[gpu_position](const auto& it) { return it.second > gpu_position; }))
{
// Wait for this fence to complete, then allocate directly after it.
new_offset = gpu_position;
new_gpu_position = gpu_position;
break;
}
}
}
}
// Did any fences satisfy this condition?
if (iter == m_tracked_fences.end())
return false;
// Wait until this fence is signaled.
VkResult res =
vkWaitForFences(g_vulkan_context->GetDevice(), 1, &iter->first, VK_TRUE, UINT64_MAX);
if (res != VK_SUCCESS)
LOG_VULKAN_ERROR(res, "vkWaitForFences failed: ");
// Update GPU position, and remove all fences up to (and including) this fence.
m_current_offset = new_offset;
m_current_gpu_position = new_gpu_position;
m_tracked_fences.erase(m_tracked_fences.begin(), ++iter);
return true;
}
} // namespace Vulkan

View File

@ -0,0 +1,59 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <deque>
#include <memory>
#include <utility>
#include "VideoBackends/Vulkan/Constants.h"
namespace Vulkan
{
class StreamBuffer
{
public:
StreamBuffer(VkBufferUsageFlags usage, size_t max_size);
~StreamBuffer();
VkBuffer GetBuffer() const { return m_buffer; }
VkDeviceMemory GetDeviceMemory() const { return m_memory; }
u8* GetHostPointer() const { return m_host_pointer; }
u8* GetCurrentHostPointer() const { return m_host_pointer + m_current_offset; }
size_t GetCurrentSize() const { return m_current_size; }
size_t GetCurrentOffset() const { return m_current_offset; }
bool ReserveMemory(size_t num_bytes, size_t alignment, bool allow_reuse = true,
bool allow_growth = true, bool reallocate_if_full = false);
void CommitMemory(size_t final_num_bytes);
static std::unique_ptr<StreamBuffer> Create(VkBufferUsageFlags usage, size_t initial_size,
size_t max_size);
private:
bool ResizeBuffer(size_t size);
void OnCommandBufferQueued(VkCommandBuffer command_buffer, VkFence fence);
void OnCommandBufferExecuted(VkFence fence);
// Waits for as many fences as needed to allocate num_bytes bytes from the buffer.
bool WaitForClearSpace(size_t num_bytes);
VkBufferUsageFlags m_usage;
size_t m_current_size = 0;
size_t m_maximum_size;
size_t m_current_offset = 0;
size_t m_current_gpu_position = 0;
size_t m_last_allocation_size = 0;
VkBuffer m_buffer = VK_NULL_HANDLE;
VkDeviceMemory m_memory = VK_NULL_HANDLE;
u8* m_host_pointer = nullptr;
// List of fences and the corresponding positions in the buffer
std::deque<std::pair<VkFence, size_t>> m_tracked_fences;
bool m_coherent_mapping = false;
};
} // namespace Vulkan

View File

@ -0,0 +1,496 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstdint>
#include "Common/Assert.h"
#include "Common/CommonFuncs.h"
#include "Common/Logging/Log.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/SwapChain.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
#if defined(VK_USE_PLATFORM_XLIB_KHR)
#include <X11/Xlib.h>
#elif defined(VK_USE_PLATFORM_XCB_KHR)
#include <X11/Xlib-xcb.h>
#include <X11/Xlib.h>
#endif
namespace Vulkan
{
SwapChain::SwapChain(void* native_handle, VkSurfaceKHR surface)
: m_native_handle(native_handle), m_surface(surface)
{
}
SwapChain::~SwapChain()
{
DestroySwapChainImages();
DestroySwapChain();
DestroyRenderPass();
DestroySurface();
}
VkSurfaceKHR SwapChain::CreateVulkanSurface(VkInstance instance, void* hwnd)
{
#if defined(VK_USE_PLATFORM_WIN32_KHR)
VkWin32SurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkWin32SurfaceCreateFlagsKHR flags
nullptr, // HINSTANCE hinstance
reinterpret_cast<HWND>(hwnd) // HWND hwnd
};
VkSurfaceKHR surface;
VkResult res = vkCreateWin32SurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateWin32SurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
#elif defined(VK_USE_PLATFORM_XLIB_KHR)
// Assuming the display handles are compatible, or shared. This matches what we do in the
// GL backend, but it's not ideal.
Display* display = XOpenDisplay(nullptr);
VkXlibSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkXlibSurfaceCreateFlagsKHR flags
display, // Display* dpy
reinterpret_cast<Window>(hwnd) // Window window
};
VkSurfaceKHR surface;
VkResult res = vkCreateXlibSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateXlibSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
#elif defined(VK_USE_PLATFORM_XCB_KHR)
// If we ever switch to using xcb, we should pass the display handle as well.
Display* display = XOpenDisplay(nullptr);
xcb_connection_t* connection = XGetXCBConnection(display);
VkXcbSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkXcbSurfaceCreateFlagsKHR flags
connection, // xcb_connection_t* connection
static_cast<xcb_window_t>(reinterpret_cast<uintptr_t>(hwnd)) // xcb_window_t window
};
VkSurfaceKHR surface;
VkResult res = vkCreateXcbSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateXcbSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
#elif defined(VK_USE_PLATFORM_ANDROID_KHR)
VkAndroidSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAndroidSurfaceCreateFlagsKHR flags
reinterpret_cast<ANativeWindow*>(hwnd) // ANativeWindow* window
};
VkSurfaceKHR surface;
VkResult res = vkCreateAndroidSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateAndroidSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
#else
return VK_NULL_HANDLE;
#endif
}
std::unique_ptr<SwapChain> SwapChain::Create(void* native_handle, VkSurfaceKHR surface)
{
std::unique_ptr<SwapChain> swap_chain = std::make_unique<SwapChain>(native_handle, surface);
if (!swap_chain->CreateSwapChain() || !swap_chain->CreateRenderPass() ||
!swap_chain->SetupSwapChainImages())
{
return nullptr;
}
return swap_chain;
}
bool SwapChain::SelectSurfaceFormat()
{
u32 format_count;
VkResult res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(),
m_surface, &format_count, nullptr);
if (res != VK_SUCCESS || format_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkSurfaceFormatKHR> surface_formats(format_count);
res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface,
&format_count, surface_formats.data());
_assert_(res == VK_SUCCESS);
// If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA
if (surface_formats[0].format == VK_FORMAT_UNDEFINED)
{
m_surface_format.format = VK_FORMAT_R8G8B8A8_UNORM;
m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return true;
}
// Use the first surface format, just use what it prefers.
// Some drivers seem to return a SRGB format here (Intel Mesa).
// This results in gamma correction when presenting to the screen, which we don't want.
// Use a linear format instead, if this is the case.
m_surface_format.format = Util::GetLinearFormat(surface_formats[0].format);
m_surface_format.colorSpace = surface_formats[0].colorSpace;
return true;
}
bool SwapChain::SelectPresentMode()
{
VkResult res;
u32 mode_count;
res = vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface,
&mode_count, nullptr);
if (res != VK_SUCCESS || mode_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkPresentModeKHR> present_modes(mode_count);
res = vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface,
&mode_count, present_modes.data());
_assert_(res == VK_SUCCESS);
// Checks if a particular mode is supported, if it is, returns that mode.
auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) {
auto it = std::find_if(present_modes.begin(), present_modes.end(),
[check_mode](VkPresentModeKHR mode) { return check_mode == mode; });
return it != present_modes.end();
};
// If vsync is enabled, prefer VK_PRESENT_MODE_FIFO_KHR.
if (g_ActiveConfig.IsVSync())
{
// Try for relaxed vsync first, since it's likely the VI won't line up with
// the refresh rate of the system exactly, so tearing once is better than
// waiting for the next vblank.
if (CheckForMode(VK_PRESENT_MODE_FIFO_RELAXED_KHR))
{
m_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
return true;
}
// Fall back to strict vsync.
if (CheckForMode(VK_PRESENT_MODE_FIFO_KHR))
{
WARN_LOG(VIDEO, "Vulkan: FIFO_RELAXED not available, falling back to FIFO.");
m_present_mode = VK_PRESENT_MODE_FIFO_KHR;
return true;
}
}
// Prefer screen-tearing, if possible, for lowest latency.
if (CheckForMode(VK_PRESENT_MODE_IMMEDIATE_KHR))
{
m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
return true;
}
// Use optimized-vsync above vsync.
if (CheckForMode(VK_PRESENT_MODE_MAILBOX_KHR))
{
m_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
return true;
}
// Fall back to whatever is available.
m_present_mode = present_modes[0];
return true;
}
bool SwapChain::CreateRenderPass()
{
// render pass for rendering to the swap chain
VkAttachmentDescription present_render_pass_attachments[] = {
{0, m_surface_format.format, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR,
VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}};
VkAttachmentReference present_render_pass_color_attachment_references[] = {
{0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}};
VkSubpassDescription present_render_pass_subpass_descriptions[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1,
present_render_pass_color_attachment_references, nullptr, nullptr, 0, nullptr}};
VkRenderPassCreateInfo present_render_pass_info = {
VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
nullptr,
0,
static_cast<u32>(ArraySize(present_render_pass_attachments)),
present_render_pass_attachments,
static_cast<u32>(ArraySize(present_render_pass_subpass_descriptions)),
present_render_pass_subpass_descriptions,
0,
nullptr};
VkResult res = vkCreateRenderPass(g_vulkan_context->GetDevice(), &present_render_pass_info,
nullptr, &m_render_pass);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateRenderPass (present) failed: ");
return false;
}
return true;
}
void SwapChain::DestroyRenderPass()
{
if (!m_render_pass)
return;
g_command_buffer_mgr->DeferResourceDestruction(m_render_pass);
m_render_pass = nullptr;
}
bool SwapChain::CreateSwapChain()
{
// Look up surface properties to determine image count and dimensions
VkSurfaceCapabilitiesKHR surface_capabilities;
VkResult res = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(g_vulkan_context->GetPhysicalDevice(),
m_surface, &surface_capabilities);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed: ");
return false;
}
// Select swap chain format and present mode
if (!SelectSurfaceFormat() || !SelectPresentMode())
return false;
// Select number of images in swap chain, we prefer one buffer in the background to work on
uint32_t image_count =
std::min(surface_capabilities.minImageCount + 1, surface_capabilities.maxImageCount);
// Determine the dimensions of the swap chain. Values of -1 indicate the size we specify here
// determines window size?
VkExtent2D size = surface_capabilities.currentExtent;
if (size.width == UINT32_MAX)
{
size.width = std::min(std::max(surface_capabilities.minImageExtent.width, 640u),
surface_capabilities.maxImageExtent.width);
size.height = std::min(std::max(surface_capabilities.minImageExtent.height, 480u),
surface_capabilities.maxImageExtent.height);
}
// Prefer identity transform if possible
VkSurfaceTransformFlagBitsKHR transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
if (!(surface_capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR))
transform = surface_capabilities.currentTransform;
// Select swap chain flags, we only need a colour attachment
VkImageUsageFlags image_usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
if (!(surface_capabilities.supportedUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
{
ERROR_LOG(VIDEO, "Vulkan: Swap chain does not support usage as color attachment");
return false;
}
// Store the old/current swap chain when recreating for resize
VkSwapchainKHR old_swap_chain = m_swap_chain;
// Now we can actually create the swap chain
// TODO: Handle case where the present queue is not the graphics queue.
VkSwapchainCreateInfoKHR swap_chain_info = {VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
nullptr,
0,
m_surface,
image_count,
m_surface_format.format,
m_surface_format.colorSpace,
size,
1,
image_usage,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
transform,
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
m_present_mode,
VK_TRUE,
old_swap_chain};
res =
vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr, &m_swap_chain);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSwapchainKHR failed: ");
return false;
}
// Now destroy the old swap chain, since it's been recreated.
// We can do this immediately since all work should have been completed before calling resize.
if (old_swap_chain != VK_NULL_HANDLE)
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), old_swap_chain, nullptr);
m_width = size.width;
m_height = size.height;
return true;
}
bool SwapChain::SetupSwapChainImages()
{
_assert_(m_swap_chain_images.empty());
uint32_t image_count;
VkResult res =
vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: ");
return false;
}
std::vector<VkImage> images(image_count);
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count,
images.data());
_assert_(res == VK_SUCCESS);
m_swap_chain_images.reserve(image_count);
for (uint32_t i = 0; i < image_count; i++)
{
SwapChainImage image;
image.image = images[i];
// Create texture object, which creates a view of the backbuffer
image.texture = Texture2D::CreateFromExistingImage(
m_width, m_height, 1, 1, m_surface_format.format, VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_VIEW_TYPE_2D, image.image);
VkImageView view = image.texture->GetView();
VkFramebufferCreateInfo framebuffer_info = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
nullptr,
0,
m_render_pass,
1,
&view,
m_width,
m_height,
1};
res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &framebuffer_info, nullptr,
&image.framebuffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer failed: ");
return false;
}
m_swap_chain_images.emplace_back(std::move(image));
}
return true;
}
void SwapChain::DestroySwapChainImages()
{
for (const auto& it : m_swap_chain_images)
{
// Images themselves are cleaned up by the swap chain object
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), it.framebuffer, nullptr);
}
m_swap_chain_images.clear();
}
void SwapChain::DestroySwapChain()
{
if (m_swap_chain == VK_NULL_HANDLE)
return;
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), m_swap_chain, nullptr);
m_swap_chain = VK_NULL_HANDLE;
}
VkResult SwapChain::AcquireNextImage(VkSemaphore available_semaphore)
{
VkResult res =
vkAcquireNextImageKHR(g_vulkan_context->GetDevice(), m_swap_chain, UINT64_MAX,
available_semaphore, VK_NULL_HANDLE, &m_current_swap_chain_image_index);
if (res != VK_SUCCESS && res != VK_ERROR_OUT_OF_DATE_KHR && res != VK_SUBOPTIMAL_KHR)
LOG_VULKAN_ERROR(res, "vkAcquireNextImageKHR failed: ");
return res;
}
bool SwapChain::ResizeSwapChain()
{
if (!CreateSwapChain())
return false;
DestroySwapChainImages();
if (!SetupSwapChainImages())
{
PanicAlert("Failed to re-configure swap chain images, this is fatal (for now)");
return false;
}
return true;
}
bool SwapChain::RecreateSurface(void* native_handle)
{
// Destroy the old swap chain, images, and surface.
DestroyRenderPass();
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
// Re-create the surface with the new native handle
m_native_handle = native_handle;
m_surface = CreateVulkanSurface(g_vulkan_context->GetVulkanInstance(), native_handle);
if (m_surface == VK_NULL_HANDLE)
return false;
// Finally re-create the swap chain
if (!CreateSwapChain() || !SetupSwapChainImages() || !CreateRenderPass())
return false;
return true;
}
void SwapChain::DestroySurface()
{
vkDestroySurfaceKHR(g_vulkan_context->GetVulkanInstance(), m_surface, nullptr);
m_surface = VK_NULL_HANDLE;
}
}

View File

@ -0,0 +1,93 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoBackends/Vulkan/Texture2D.h"
namespace Vulkan
{
class CommandBufferManager;
class ObjectCache;
class SwapChain
{
public:
SwapChain(void* native_handle, VkSurfaceKHR surface);
~SwapChain();
// Creates a vulkan-renderable surface for the specified window handle.
static VkSurfaceKHR CreateVulkanSurface(VkInstance instance, void* hwnd);
// Create a new swap chain from a pre-existing surface.
static std::unique_ptr<SwapChain> Create(void* native_handle, VkSurfaceKHR surface);
void* GetNativeHandle() const { return m_native_handle; }
VkSurfaceKHR GetSurface() const { return m_surface; }
VkSurfaceFormatKHR GetSurfaceFormat() const { return m_surface_format; }
VkSwapchainKHR GetSwapChain() const { return m_swap_chain; }
VkRenderPass GetRenderPass() const { return m_render_pass; }
u32 GetWidth() const { return m_width; }
u32 GetHeight() const { return m_height; }
u32 GetCurrentImageIndex() const { return m_current_swap_chain_image_index; }
VkImage GetCurrentImage() const
{
return m_swap_chain_images[m_current_swap_chain_image_index].image;
}
Texture2D* GetCurrentTexture() const
{
return m_swap_chain_images[m_current_swap_chain_image_index].texture.get();
}
VkFramebuffer GetCurrentFramebuffer() const
{
return m_swap_chain_images[m_current_swap_chain_image_index].framebuffer;
}
VkResult AcquireNextImage(VkSemaphore available_semaphore);
bool RecreateSurface(void* native_handle);
bool ResizeSwapChain();
private:
bool SelectSurfaceFormat();
bool SelectPresentMode();
bool CreateSwapChain();
void DestroySwapChain();
bool CreateRenderPass();
void DestroyRenderPass();
bool SetupSwapChainImages();
void DestroySwapChainImages();
void DestroySurface();
struct SwapChainImage
{
VkImage image;
std::unique_ptr<Texture2D> texture;
VkFramebuffer framebuffer;
};
void* m_native_handle = nullptr;
VkSurfaceKHR m_surface = nullptr;
VkSurfaceFormatKHR m_surface_format = {};
VkPresentModeKHR m_present_mode = VK_PRESENT_MODE_RANGE_SIZE_KHR;
VkSwapchainKHR m_swap_chain = nullptr;
std::vector<SwapChainImage> m_swap_chain_images;
u32 m_current_swap_chain_image_index = 0;
VkRenderPass m_render_pass = nullptr;
u32 m_width = 0;
u32 m_height = 0;
};
} // namespace Vulkan

View File

@ -0,0 +1,280 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/Texture2D.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
Texture2D::Texture2D(u32 width, u32 height, u32 levels, u32 layers, VkFormat format,
VkSampleCountFlagBits samples, VkImageViewType view_type, VkImage image,
VkDeviceMemory device_memory, VkImageView view)
: m_width(width), m_height(height), m_levels(levels), m_layers(layers), m_format(format),
m_samples(samples), m_view_type(view_type), m_image(image), m_device_memory(device_memory),
m_view(view)
{
}
Texture2D::~Texture2D()
{
g_command_buffer_mgr->DeferResourceDestruction(m_view);
// If we don't have device memory allocated, the image is not owned by us (e.g. swapchain)
if (m_device_memory != VK_NULL_HANDLE)
{
g_command_buffer_mgr->DeferResourceDestruction(m_image);
g_command_buffer_mgr->DeferResourceDestruction(m_device_memory);
}
}
std::unique_ptr<Texture2D> Texture2D::Create(u32 width, u32 height, u32 levels, u32 layers,
VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling,
VkImageUsageFlags usage)
{
VkImageCreateInfo image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
0,
VK_IMAGE_TYPE_2D,
format,
{width, height, 1},
levels,
layers,
samples,
tiling,
usage,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImage image = VK_NULL_HANDLE;
VkResult res = vkCreateImage(g_vulkan_context->GetDevice(), &image_info, nullptr, &image);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImage failed: ");
return nullptr;
}
// Allocate memory to back this texture, we want device local memory in this case
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(g_vulkan_context->GetDevice(), image, &memory_requirements);
VkMemoryAllocateInfo memory_info = {
VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, nullptr, memory_requirements.size,
g_vulkan_context->GetMemoryType(memory_requirements.memoryTypeBits,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)};
VkDeviceMemory device_memory;
res = vkAllocateMemory(g_vulkan_context->GetDevice(), &memory_info, nullptr, &device_memory);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkAllocateMemory failed: ");
vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr);
return nullptr;
}
res = vkBindImageMemory(g_vulkan_context->GetDevice(), image, device_memory, 0);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkBindImageMemory failed: ");
vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), device_memory, nullptr);
return nullptr;
}
VkImageViewCreateInfo view_info = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image,
view_type,
format,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{Util::IsDepthFormat(format) ? static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
VkImageView view = VK_NULL_HANDLE;
res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
vkDestroyImage(g_vulkan_context->GetDevice(), image, nullptr);
vkFreeMemory(g_vulkan_context->GetDevice(), device_memory, nullptr);
return nullptr;
}
return std::make_unique<Texture2D>(width, height, levels, layers, format, samples, view_type,
image, device_memory, view);
}
std::unique_ptr<Texture2D> Texture2D::CreateFromExistingImage(u32 width, u32 height, u32 levels,
u32 layers, VkFormat format,
VkSampleCountFlagBits samples,
VkImageViewType view_type,
VkImage existing_image)
{
// Only need to create the image view, this is mainly for swap chains.
VkImageViewCreateInfo view_info = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
existing_image,
view_type,
format,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{Util::IsDepthFormat(format) ? static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
VkImageView view = VK_NULL_HANDLE;
VkResult res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
return nullptr;
}
return std::make_unique<Texture2D>(width, height, levels, layers, format, samples, view_type,
existing_image, nullptr, view);
}
void Texture2D::OverrideImageLayout(VkImageLayout new_layout)
{
m_layout = new_layout;
}
void Texture2D::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout)
{
if (m_layout == new_layout)
return;
VkImageMemoryBarrier barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAccessFlags srcAccessMask
0, // VkAccessFlags dstAccessMask
m_layout, // VkImageLayout oldLayout
new_layout, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_image, // VkImage image
{static_cast<VkImageAspectFlags>(Util::IsDepthFormat(m_format) ? VK_IMAGE_ASPECT_DEPTH_BIT :
VK_IMAGE_ASPECT_COLOR_BIT),
0, m_levels, 0, m_layers} // VkImageSubresourceRange subresourceRange
};
// srcStageMask -> Stages that must complete before the barrier
// dstStageMask -> Stages that must wait for after the barrier before beginning
VkPipelineStageFlags srcStageMask, dstStageMask;
switch (m_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
// Layout undefined therefore contents undefined, and we don't care what happens to it.
barrier.srcAccessMask = 0;
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_PREINITIALIZED:
// Image has been pre-initialized by the host, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_HOST_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image was being used as a color attachment, so ensure all writes have completed.
barrier.srcAccessMask =
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image was being used as a depthstencil attachment, so ensure all writes have completed.
barrier.srcAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image was being used as a shader resource, make sure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image was being used as a copy source, ensure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image was being used as a copy destination, ensure all writes have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
default:
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
}
switch (new_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
barrier.dstAccessMask = 0;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask =
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// TODO: Can we use FRAGMENT_SHADER here? We don't sample textures in the earlier stages.
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
default:
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
break;
}
vkCmdPipelineBarrier(command_buffer, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1,
&barrier);
m_layout = new_layout;
}
} // namespace Vulkan

View File

@ -0,0 +1,67 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "VideoBackends/Vulkan/Constants.h"
namespace Vulkan
{
class CommandBufferManager;
class ObjectCache;
class Texture2D
{
public:
Texture2D(u32 width, u32 height, u32 levels, u32 layers, VkFormat format,
VkSampleCountFlagBits samples, VkImageViewType view_type, VkImage image,
VkDeviceMemory device_memory, VkImageView view);
~Texture2D();
static std::unique_ptr<Texture2D> Create(u32 width, u32 height, u32 levels, u32 layers,
VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling,
VkImageUsageFlags usage);
static std::unique_ptr<Texture2D> CreateFromExistingImage(u32 width, u32 height, u32 levels,
u32 layers, VkFormat format,
VkSampleCountFlagBits samples,
VkImageViewType view_type,
VkImage existing_image);
u32 GetWidth() const { return m_width; }
u32 GetHeight() const { return m_height; }
u32 GetLevels() const { return m_levels; }
u32 GetLayers() const { return m_layers; }
VkFormat GetFormat() const { return m_format; }
VkSampleCountFlagBits GetSamples() const { return m_samples; }
VkImageLayout GetLayout() const { return m_layout; }
VkImageViewType GetViewType() const { return m_view_type; }
VkImage GetImage() const { return m_image; }
VkDeviceMemory GetDeviceMemory() const { return m_device_memory; }
VkImageView GetView() const { return m_view; }
// Used when the render pass is changing the image layout, or to force it to
// VK_IMAGE_LAYOUT_UNDEFINED, if the existing contents of the image is
// irrelevant and will not be loaded.
void OverrideImageLayout(VkImageLayout new_layout);
void TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout);
private:
u32 m_width;
u32 m_height;
u32 m_levels;
u32 m_layers;
VkFormat m_format;
VkSampleCountFlagBits m_samples;
VkImageViewType m_view_type;
VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImage m_image;
VkDeviceMemory m_device_memory;
VkImageView m_view;
};
}

View File

@ -0,0 +1,740 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstring>
#include <vector>
#include "Common/Assert.h"
#include "Common/CommonFuncs.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/FramebufferManager.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
#include "VideoBackends/Vulkan/PaletteTextureConverter.h"
#include "VideoBackends/Vulkan/Renderer.h"
#include "VideoBackends/Vulkan/StagingTexture2D.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoBackends/Vulkan/Texture2D.h"
#include "VideoBackends/Vulkan/TextureCache.h"
#include "VideoBackends/Vulkan/TextureEncoder.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
#include "VideoCommon/ImageWrite.h"
namespace Vulkan
{
TextureCache::TextureCache()
{
}
TextureCache::~TextureCache()
{
if (m_initialize_render_pass != VK_NULL_HANDLE)
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_initialize_render_pass, nullptr);
if (m_update_render_pass != VK_NULL_HANDLE)
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_update_render_pass, nullptr);
}
bool TextureCache::Initialize(StateTracker* state_tracker)
{
m_state_tracker = state_tracker;
m_texture_upload_buffer =
StreamBuffer::Create(VK_BUFFER_USAGE_TRANSFER_SRC_BIT, INITIAL_TEXTURE_UPLOAD_BUFFER_SIZE,
MAXIMUM_TEXTURE_UPLOAD_BUFFER_SIZE);
if (!m_texture_upload_buffer)
{
PanicAlert("Failed to create texture upload buffer");
return false;
}
if (!CreateRenderPasses())
{
PanicAlert("Failed to create copy render pass");
return false;
}
m_texture_encoder = std::make_unique<TextureEncoder>();
if (!m_texture_encoder->Initialize())
{
PanicAlert("Failed to initialize texture encoder.");
return false;
}
m_palette_texture_converter = std::make_unique<PaletteTextureConverter>();
if (!m_palette_texture_converter->Initialize())
{
PanicAlert("Failed to initialize palette texture converter");
return false;
}
if (!CompileShaders())
{
PanicAlert("Failed to compile one or more shaders");
return false;
}
return true;
}
void TextureCache::ConvertTexture(TCacheEntryBase* base_entry, TCacheEntryBase* base_unconverted,
void* palette, TlutFormat format)
{
TCacheEntry* entry = static_cast<TCacheEntry*>(base_entry);
TCacheEntry* unconverted = static_cast<TCacheEntry*>(base_unconverted);
_assert_(entry->config.rendertarget);
m_palette_texture_converter->ConvertTexture(
m_state_tracker, GetRenderPassForTextureUpdate(entry->GetTexture()), entry->GetFramebuffer(),
unconverted->GetTexture(), entry->config.width, entry->config.height, palette, format);
// Render pass transitions to SHADER_READ_ONLY.
entry->GetTexture()->OverrideImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
void TextureCache::CopyEFB(u8* dst, u32 format, u32 native_width, u32 bytes_per_row,
u32 num_blocks_y, u32 memory_stride, PEControl::PixelFormat src_format,
const EFBRectangle& src_rect, bool is_intensity, bool scale_by_half)
{
// A better way of doing this would be nice.
FramebufferManager* framebuffer_mgr =
static_cast<FramebufferManager*>(g_framebuffer_manager.get());
// Flush EFB pokes first, as they're expected to be included.
framebuffer_mgr->FlushEFBPokes(m_state_tracker);
// MSAA case where we need to resolve first.
// TODO: Do in one pass.
TargetRectangle scaled_src_rect = g_renderer->ConvertEFBRectangle(src_rect);
VkRect2D region = {{scaled_src_rect.left, scaled_src_rect.top},
{static_cast<u32>(scaled_src_rect.GetWidth()),
static_cast<u32>(scaled_src_rect.GetHeight())}};
Texture2D* src_texture = (src_format == PEControl::Z24) ?
framebuffer_mgr->ResolveEFBDepthTexture(m_state_tracker, region) :
framebuffer_mgr->ResolveEFBColorTexture(m_state_tracker, region);
// End render pass before barrier (since we have no self-dependencies)
m_state_tracker->EndRenderPass();
m_state_tracker->SetPendingRebind();
m_state_tracker->InvalidateDescriptorSets();
m_state_tracker->OnReadback();
// Transition to shader resource before reading.
VkImageLayout original_layout = src_texture->GetLayout();
src_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
m_texture_encoder->EncodeTextureToRam(m_state_tracker, src_texture->GetView(), dst, format,
native_width, bytes_per_row, num_blocks_y, memory_stride,
src_format, is_intensity, scale_by_half, src_rect);
// Transition back to original state
src_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(), original_layout);
}
TextureCacheBase::TCacheEntryBase* TextureCache::CreateTexture(const TCacheEntryConfig& config)
{
// Determine image usage, we need to flag as an attachment if it can be used as a rendertarget.
VkImageUsageFlags usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT;
if (config.rendertarget)
usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
// Allocate texture object
std::unique_ptr<Texture2D> texture = Texture2D::Create(
config.width, config.height, config.levels, config.layers, TEXTURECACHE_TEXTURE_FORMAT,
VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_VIEW_TYPE_2D_ARRAY, VK_IMAGE_TILING_OPTIMAL, usage);
if (!texture)
return nullptr;
// If this is a render target (for efb copies), allocate a framebuffer
VkFramebuffer framebuffer = VK_NULL_HANDLE;
if (config.rendertarget)
{
VkImageView framebuffer_attachments[] = {texture->GetView()};
VkFramebufferCreateInfo framebuffer_info = {
VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
nullptr,
0,
m_initialize_render_pass,
static_cast<u32>(ArraySize(framebuffer_attachments)),
framebuffer_attachments,
texture->GetWidth(),
texture->GetHeight(),
texture->GetLayers()};
VkResult res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &framebuffer_info, nullptr,
&framebuffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer failed: ");
return nullptr;
}
// Clear render targets before use to prevent reading uninitialized memory.
VkClearColorValue clear_value = {{0.0f, 0.0f, 0.0f, 1.0f}};
VkImageSubresourceRange clear_range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, config.levels, 0,
config.layers};
texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdClearColorImage(g_command_buffer_mgr->GetCurrentInitCommandBuffer(), texture->GetImage(),
texture->GetLayout(), &clear_value, 1, &clear_range);
}
return new TCacheEntry(config, this, std::move(texture), framebuffer);
}
bool TextureCache::CreateRenderPasses()
{
static constexpr VkAttachmentDescription initialize_attachment = {
0,
TEXTURECACHE_TEXTURE_FORMAT,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
static constexpr VkAttachmentDescription update_attachment = {
0,
TEXTURECACHE_TEXTURE_FORMAT,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
static constexpr VkAttachmentReference color_attachment_reference = {
0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
static constexpr VkSubpassDescription subpass_description = {
0, VK_PIPELINE_BIND_POINT_GRAPHICS,
0, nullptr,
1, &color_attachment_reference,
nullptr, nullptr,
0, nullptr};
static constexpr VkSubpassDependency initialize_dependancies[] = {
{VK_SUBPASS_EXTERNAL, 0, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT},
{0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT, VK_DEPENDENCY_BY_REGION_BIT}};
static constexpr VkSubpassDependency update_dependancies[] = {
{VK_SUBPASS_EXTERNAL, 0, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_SHADER_READ_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT},
{0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT, VK_DEPENDENCY_BY_REGION_BIT}};
VkRenderPassCreateInfo initialize_info = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
nullptr,
0,
1,
&initialize_attachment,
1,
&subpass_description,
static_cast<u32>(ArraySize(initialize_dependancies)),
initialize_dependancies};
VkRenderPassCreateInfo update_info = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
nullptr,
0,
1,
&update_attachment,
1,
&subpass_description,
static_cast<u32>(ArraySize(update_dependancies)),
update_dependancies};
VkResult res = vkCreateRenderPass(g_vulkan_context->GetDevice(), &initialize_info, nullptr,
&m_initialize_render_pass);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateRenderPass (initialize) failed: ");
return false;
}
res = vkCreateRenderPass(g_vulkan_context->GetDevice(), &update_info, nullptr,
&m_update_render_pass);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateRenderPass (update) failed: ");
return false;
}
return true;
}
VkRenderPass TextureCache::GetRenderPassForTextureUpdate(const Texture2D* texture) const
{
// EFB copies can be re-used as part of the texture pool. If this is the case, we need to insert
// a pipeline barrier to ensure that all reads from the texture expecting the old data have
// completed before overwriting the texture's contents. New textures will be in TRANSFER_DST
// due to the clear after creation.
// These two render passes are compatible, so even though the framebuffer was created with
// the initialize render pass it's still allowed.
if (texture->GetLayout() == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
return m_initialize_render_pass;
else
return m_update_render_pass;
}
TextureCache::TCacheEntry::TCacheEntry(const TCacheEntryConfig& config_, TextureCache* parent,
std::unique_ptr<Texture2D> texture,
VkFramebuffer framebuffer)
: TCacheEntryBase(config_), m_parent(parent), m_texture(std::move(texture)),
m_framebuffer(framebuffer)
{
}
TextureCache::TCacheEntry::~TCacheEntry()
{
// Texture is automatically cleaned up, however, we don't want to leave it bound to the state
// tracker.
m_parent->m_state_tracker->UnbindTexture(m_texture->GetView());
if (m_framebuffer != VK_NULL_HANDLE)
g_command_buffer_mgr->DeferResourceDestruction(m_framebuffer);
}
void TextureCache::TCacheEntry::Load(unsigned int width, unsigned int height,
unsigned int expanded_width, unsigned int level)
{
// Can't copy data larger than the texture extents.
width = std::max(1u, std::min(width, m_texture->GetWidth() >> level));
height = std::max(1u, std::min(height, m_texture->GetHeight() >> level));
// We don't care about the existing contents of the texture, so we set the image layout to
// VK_IMAGE_LAYOUT_UNDEFINED here. However, if this texture is being re-used from the texture
// pool, it may still be in use. We assume that it's not, as non-efb-copy textures are only
// returned to the pool when the frame number is different, furthermore, we're doing this
// on the initialize command buffer, so a texture being re-used mid-frame would have undesirable
// effects regardless.
VkImageMemoryBarrier barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAccessFlags srcAccessMask
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_texture->GetImage(), // VkImage image
{VK_IMAGE_ASPECT_COLOR_BIT, level, 1, 0, 1}, // VkImageSubresourceRange subresourceRange
};
vkCmdPipelineBarrier(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
// Does this texture data fit within the streaming buffer?
u32 upload_width = width;
u32 upload_pitch = upload_width * sizeof(u32);
u32 upload_size = upload_pitch * height;
u32 upload_alignment = static_cast<u32>(g_vulkan_context->GetBufferImageGranularity());
u32 source_pitch = expanded_width * 4;
if ((upload_size + upload_alignment) <= STAGING_TEXTURE_UPLOAD_THRESHOLD &&
(upload_size + upload_alignment) <= MAXIMUM_TEXTURE_UPLOAD_BUFFER_SIZE)
{
// Assume tightly packed rows, with no padding as the buffer source.
StreamBuffer* upload_buffer = m_parent->m_texture_upload_buffer.get();
// Allocate memory from the streaming buffer for the texture data.
if (!upload_buffer->ReserveMemory(upload_size, g_vulkan_context->GetBufferImageGranularity()))
{
// Execute the command buffer first.
WARN_LOG(VIDEO, "Executing command list while waiting for space in texture upload buffer");
Util::ExecuteCurrentCommandsAndRestoreState(m_parent->m_state_tracker, false);
// Try allocating again. This may cause a fence wait.
if (!upload_buffer->ReserveMemory(upload_size, g_vulkan_context->GetBufferImageGranularity()))
PanicAlert("Failed to allocate space in texture upload buffer");
}
// Grab buffer pointers
VkBuffer image_upload_buffer = upload_buffer->GetBuffer();
VkDeviceSize image_upload_buffer_offset = upload_buffer->GetCurrentOffset();
u8* image_upload_buffer_pointer = upload_buffer->GetCurrentHostPointer();
// Copy to the buffer using the stride from the subresource layout
const u8* source_ptr = TextureCache::temp;
if (upload_pitch != source_pitch)
{
VkDeviceSize copy_pitch = std::min(source_pitch, upload_pitch);
for (unsigned int row = 0; row < height; row++)
{
memcpy(image_upload_buffer_pointer + row * upload_pitch, source_ptr + row * source_pitch,
copy_pitch);
}
}
else
{
// Can copy the whole thing in one block, the pitch matches
memcpy(image_upload_buffer_pointer, source_ptr, upload_size);
}
// Flush buffer memory if necessary
upload_buffer->CommitMemory(upload_size);
// Copy from the streaming buffer to the actual image.
VkBufferImageCopy image_copy = {
image_upload_buffer_offset, // VkDeviceSize bufferOffset
0, // uint32_t bufferRowLength
0, // uint32_t bufferImageHeight
{VK_IMAGE_ASPECT_COLOR_BIT, level, 0, 1}, // VkImageSubresourceLayers imageSubresource
{0, 0, 0}, // VkOffset3D imageOffset
{width, height, 1} // VkExtent3D imageExtent
};
vkCmdCopyBufferToImage(g_command_buffer_mgr->GetCurrentInitCommandBuffer(), image_upload_buffer,
m_texture->GetImage(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&image_copy);
}
else
{
// Slow path. The data for the image is too large to fit in the streaming buffer, so we need
// to allocate a temporary texture to store the data in, then copy to the real texture.
std::unique_ptr<StagingTexture2D> staging_texture = StagingTexture2D::Create(
STAGING_BUFFER_TYPE_UPLOAD, width, height, TEXTURECACHE_TEXTURE_FORMAT);
if (!staging_texture || !staging_texture->Map())
{
PanicAlert("Failed to allocate staging texture for large texture upload.");
return;
}
// Copy data to staging texture first, then to the "real" texture.
staging_texture->WriteTexels(0, 0, width, height, TextureCache::temp, source_pitch);
staging_texture->CopyToImage(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
m_texture->GetImage(), VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, width,
height, level, 0);
}
// Transition to shader read only.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
vkCmdPipelineBarrier(g_command_buffer_mgr->GetCurrentInitCommandBuffer(),
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
m_texture->OverrideImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
void TextureCache::TCacheEntry::FromRenderTarget(u8* dst, PEControl::PixelFormat src_format,
const EFBRectangle& src_rect, bool scale_by_half,
unsigned int cbufid, const float* colmat)
{
// A better way of doing this would be nice.
FramebufferManager* framebuffer_mgr =
static_cast<FramebufferManager*>(g_framebuffer_manager.get());
TargetRectangle scaled_src_rect = g_renderer->ConvertEFBRectangle(src_rect);
bool is_depth_copy = (src_format == PEControl::Z24);
// Flush EFB pokes first, as they're expected to be included.
framebuffer_mgr->FlushEFBPokes(m_parent->m_state_tracker);
// Has to be flagged as a render target.
_assert_(m_framebuffer != VK_NULL_HANDLE);
// Can't be done in a render pass, since we're doing our own render pass!
StateTracker* state_tracker = m_parent->m_state_tracker;
VkCommandBuffer command_buffer = g_command_buffer_mgr->GetCurrentCommandBuffer();
state_tracker->EndRenderPass();
// Transition EFB to shader resource before binding
VkRect2D region = {{scaled_src_rect.left, scaled_src_rect.top},
{static_cast<u32>(scaled_src_rect.GetWidth()),
static_cast<u32>(scaled_src_rect.GetHeight())}};
Texture2D* src_texture = is_depth_copy ?
framebuffer_mgr->ResolveEFBDepthTexture(state_tracker, region) :
framebuffer_mgr->ResolveEFBColorTexture(state_tracker, region);
VkSampler src_sampler =
scale_by_half ? g_object_cache->GetLinearSampler() : g_object_cache->GetPointSampler();
VkImageLayout original_layout = src_texture->GetLayout();
src_texture->TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
UtilityShaderDraw draw(
command_buffer, g_object_cache->GetPushConstantPipelineLayout(),
m_parent->GetRenderPassForTextureUpdate(m_texture.get()),
g_object_cache->GetPassthroughVertexShader(), g_object_cache->GetPassthroughGeometryShader(),
is_depth_copy ? m_parent->m_efb_depth_to_tex_shader : m_parent->m_efb_color_to_tex_shader);
draw.SetPushConstants(colmat, (is_depth_copy ? sizeof(float) * 20 : sizeof(float) * 28));
draw.SetPSSampler(0, src_texture->GetView(), src_sampler);
VkRect2D dest_region = {{0, 0}, {m_texture->GetWidth(), m_texture->GetHeight()}};
draw.BeginRenderPass(m_framebuffer, dest_region);
draw.DrawQuad(0, 0, config.width, config.height, scaled_src_rect.left, scaled_src_rect.top, 0,
scaled_src_rect.GetWidth(), scaled_src_rect.GetHeight(),
framebuffer_mgr->GetEFBWidth(), framebuffer_mgr->GetEFBHeight());
draw.EndRenderPass();
// We touched everything, so put it back.
state_tracker->SetPendingRebind();
// Transition the EFB back to its original layout.
src_texture->TransitionToLayout(command_buffer, original_layout);
// Render pass transitions texture to SHADER_READ_ONLY.
m_texture->OverrideImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
void TextureCache::TCacheEntry::CopyRectangleFromTexture(const TCacheEntryBase* source,
const MathUtil::Rectangle<int>& src_rect,
const MathUtil::Rectangle<int>& dst_rect)
{
const TCacheEntry* source_vk = static_cast<const TCacheEntry*>(source);
VkCommandBuffer command_buffer = g_command_buffer_mgr->GetCurrentCommandBuffer();
// Fast path when not scaling the image.
if (src_rect.GetWidth() == dst_rect.GetWidth() && src_rect.GetHeight() == dst_rect.GetHeight())
{
// These assertions should hold true unless the base code is passing us sizes too large, in
// which case it should be fixed instead.
_assert_msg_(VIDEO, static_cast<u32>(src_rect.GetWidth()) <= source->config.width &&
static_cast<u32>(src_rect.GetHeight()) <= source->config.height,
"Source rect is too large for CopyRectangleFromTexture");
_assert_msg_(VIDEO, static_cast<u32>(dst_rect.GetWidth()) <= config.width &&
static_cast<u32>(dst_rect.GetHeight()) <= config.height,
"Dest rect is too large for CopyRectangleFromTexture");
VkImageCopy image_copy = {
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 0,
source->config.layers}, // VkImageSubresourceLayers srcSubresource
{src_rect.left, src_rect.top, 0}, // VkOffset3D srcOffset
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 0,
config.layers}, // VkImageSubresourceLayers dstSubresource
{dst_rect.left, dst_rect.top, 0}, // VkOffset3D dstOffset
{static_cast<uint32_t>(src_rect.GetWidth()), static_cast<uint32_t>(src_rect.GetHeight()),
1} // VkExtent3D extent
};
// Must be called outside of a render pass.
m_parent->m_state_tracker->EndRenderPass();
source_vk->m_texture->TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
m_texture->TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vkCmdCopyImage(command_buffer, source_vk->m_texture->GetImage(),
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_texture->GetImage(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy);
m_texture->TransitionToLayout(command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
source_vk->m_texture->TransitionToLayout(command_buffer,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
return;
}
// Can't do this within a game render pass.
m_parent->m_state_tracker->EndRenderPass();
m_parent->m_state_tracker->SetPendingRebind();
// Can't render to a non-rendertarget (no framebuffer).
_assert_msg_(VIDEO, config.rendertarget,
"Destination texture for partial copy is not a rendertarget");
UtilityShaderDraw draw(
g_command_buffer_mgr->GetCurrentCommandBuffer(), g_object_cache->GetStandardPipelineLayout(),
m_parent->GetRenderPassForTextureUpdate(m_texture.get()),
g_object_cache->GetPassthroughVertexShader(), VK_NULL_HANDLE, m_parent->m_copy_shader);
VkRect2D region = {
{dst_rect.left, dst_rect.top},
{static_cast<u32>(dst_rect.GetWidth()), static_cast<u32>(dst_rect.GetHeight())}};
draw.BeginRenderPass(m_framebuffer, region);
draw.SetPSSampler(0, source_vk->GetTexture()->GetView(), g_object_cache->GetLinearSampler());
draw.DrawQuad(dst_rect.left, dst_rect.top, dst_rect.GetWidth(), dst_rect.GetHeight(),
src_rect.left, src_rect.top, 0, src_rect.GetWidth(), src_rect.GetHeight(),
source->config.width, source->config.height);
draw.EndRenderPass();
// Render pass transitions texture to SHADER_READ_ONLY.
m_texture->OverrideImageLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
void TextureCache::TCacheEntry::Bind(unsigned int stage)
{
m_parent->m_state_tracker->SetTexture(stage, m_texture->GetView());
}
bool TextureCache::TCacheEntry::Save(const std::string& filename, unsigned int level)
{
_assert_(level < config.levels);
// Determine dimensions of image we want to save.
u32 level_width = std::max(1u, config.width >> level);
u32 level_height = std::max(1u, config.height >> level);
// Use a temporary staging texture for the download. Certainly not optimal,
// but since we have to idle the GPU anyway it doesn't really matter.
std::unique_ptr<StagingTexture2D> staging_texture = StagingTexture2D::Create(
STAGING_BUFFER_TYPE_READBACK, level_width, level_height, TEXTURECACHE_TEXTURE_FORMAT);
// Transition image to transfer source, and invalidate the current state,
// since we'll be executing the command buffer.
m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
m_parent->m_state_tracker->EndRenderPass();
// Copy to download buffer.
staging_texture->CopyFromImage(g_command_buffer_mgr->GetCurrentCommandBuffer(),
m_texture->GetImage(), VK_IMAGE_ASPECT_COLOR_BIT, 0, 0,
level_width, level_height, level, 0);
// Restore original state of texture.
m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
// Block until the GPU has finished copying to the staging texture.
g_command_buffer_mgr->ExecuteCommandBuffer(false, true);
m_parent->m_state_tracker->InvalidateDescriptorSets();
m_parent->m_state_tracker->SetPendingRebind();
// Map the staging texture so we can copy the contents out.
if (staging_texture->Map())
{
PanicAlert("Failed to map staging texture");
return false;
}
// Write texture out to file.
// It's okay to throw this texture away immediately, since we're done with it, and
// we blocked until the copy completed on the GPU anyway.
bool result = TextureToPng(reinterpret_cast<u8*>(staging_texture->GetMapPointer()),
staging_texture->GetRowStride(), filename, level_width, level_height);
staging_texture->Unmap();
return result;
}
bool TextureCache::CompileShaders()
{
static const char COPY_SHADER_SOURCE[] = R"(
layout(set = 1, binding = 0) uniform sampler2DArray samp0;
layout(location = 0) in float3 uv0;
layout(location = 1) in float4 col0;
layout(location = 0) out float4 ocol0;
void main()
{
ocol0 = texture(samp0, uv0);
}
)";
static const char EFB_COLOR_TO_TEX_SOURCE[] = R"(
SAMPLER_BINDING(0) uniform sampler2DArray samp0;
layout(std140, push_constant) uniform PSBlock
{
vec4 colmat[7];
} C;
layout(location = 0) in vec3 uv0;
layout(location = 1) in vec4 col0;
layout(location = 0) out vec4 ocol0;
void main()
{
float4 texcol = texture(samp0, uv0);
texcol = round(texcol * C.colmat[5]) * C.colmat[6];
ocol0 = texcol * mat4(C.colmat[0], C.colmat[1], C.colmat[2], C.colmat[3]) + C.colmat[4];
}
)";
static const char EFB_DEPTH_TO_TEX_SOURCE[] = R"(
SAMPLER_BINDING(0) uniform sampler2DArray samp0;
layout(std140, push_constant) uniform PSBlock
{
vec4 colmat[5];
} C;
layout(location = 0) in vec3 uv0;
layout(location = 1) in vec4 col0;
layout(location = 0) out vec4 ocol0;
void main()
{
#if MONO_DEPTH
vec4 texcol = texture(samp0, vec3(uv0.xy, 0.0f));
#else
vec4 texcol = texture(samp0, uv0);
#endif
int depth = int((1.0 - texcol.x) * 16777216.0);
// Convert to Z24 format
ivec4 workspace;
workspace.r = (depth >> 16) & 255;
workspace.g = (depth >> 8) & 255;
workspace.b = depth & 255;
// Convert to Z4 format
workspace.a = (depth >> 16) & 0xF0;
// Normalize components to [0.0..1.0]
texcol = vec4(workspace) / 255.0;
ocol0 = texcol * mat4(C.colmat[0], C.colmat[1], C.colmat[2], C.colmat[3]) + C.colmat[4];
}
)";
std::string header = g_object_cache->GetUtilityShaderHeader();
std::string source;
source = header + COPY_SHADER_SOURCE;
m_copy_shader = Util::CompileAndCreateFragmentShader(source);
source = header + EFB_COLOR_TO_TEX_SOURCE;
m_efb_color_to_tex_shader = Util::CompileAndCreateFragmentShader(source);
if (g_ActiveConfig.bStereoEFBMonoDepth)
source = header + "#define MONO_DEPTH 1\n" + EFB_DEPTH_TO_TEX_SOURCE;
else
source = header + EFB_DEPTH_TO_TEX_SOURCE;
m_efb_depth_to_tex_shader = Util::CompileAndCreateFragmentShader(source);
return (m_copy_shader != VK_NULL_HANDLE && m_efb_color_to_tex_shader != VK_NULL_HANDLE &&
m_efb_depth_to_tex_shader != VK_NULL_HANDLE);
}
void TextureCache::DeleteShaders()
{
auto DestroyShader = [this](VkShaderModule& shader) {
if (shader != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), shader, nullptr);
shader = VK_NULL_HANDLE;
}
};
// Since this can be called by the base class we need to wait for idle.
g_command_buffer_mgr->WaitForGPUIdle();
DestroyShader(m_copy_shader);
DestroyShader(m_efb_color_to_tex_shader);
DestroyShader(m_efb_depth_to_tex_shader);
}
} // namespace Vulkan

View File

@ -0,0 +1,87 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoCommon/TextureCacheBase.h"
namespace Vulkan
{
class PaletteTextureConverter;
class StateTracker;
class Texture2D;
class TextureEncoder;
class TextureCache : public TextureCacheBase
{
public:
TextureCache();
~TextureCache();
bool Initialize(StateTracker* state_tracker);
bool CompileShaders() override;
void DeleteShaders() override;
void ConvertTexture(TCacheEntryBase* base_entry, TCacheEntryBase* base_unconverted, void* palette,
TlutFormat format) override;
void CopyEFB(u8* dst, u32 format, u32 native_width, u32 bytes_per_row, u32 num_blocks_y,
u32 memory_stride, PEControl::PixelFormat src_format, const EFBRectangle& src_rect,
bool is_intensity, bool scale_by_half) override;
private:
struct TCacheEntry : TCacheEntryBase
{
TCacheEntry(const TCacheEntryConfig& config_, TextureCache* parent,
std::unique_ptr<Texture2D> texture, VkFramebuffer framebuffer);
~TCacheEntry();
Texture2D* GetTexture() const { return m_texture.get(); }
VkFramebuffer GetFramebuffer() const { return m_framebuffer; }
void Load(unsigned int width, unsigned int height, unsigned int expanded_width,
unsigned int level) override;
void FromRenderTarget(u8* dst, PEControl::PixelFormat src_format, const EFBRectangle& src_rect,
bool scale_by_half, unsigned int cbufid, const float* colmat) override;
void CopyRectangleFromTexture(const TCacheEntryBase* source,
const MathUtil::Rectangle<int>& src_rect,
const MathUtil::Rectangle<int>& dst_rect) override;
void Bind(unsigned int stage) override;
bool Save(const std::string& filename, unsigned int level) override;
private:
TextureCache* m_parent;
std::unique_ptr<Texture2D> m_texture;
// If we're an EFB copy, framebuffer for drawing into.
VkFramebuffer m_framebuffer;
};
TCacheEntryBase* CreateTexture(const TCacheEntryConfig& config) override;
bool CreateRenderPasses();
VkRenderPass GetRenderPassForTextureUpdate(const Texture2D* texture) const;
StateTracker* m_state_tracker = nullptr;
VkRenderPass m_initialize_render_pass = VK_NULL_HANDLE;
VkRenderPass m_update_render_pass = VK_NULL_HANDLE;
std::unique_ptr<StreamBuffer> m_texture_upload_buffer;
std::unique_ptr<TextureEncoder> m_texture_encoder;
std::unique_ptr<PaletteTextureConverter> m_palette_texture_converter;
VkShaderModule m_copy_shader = VK_NULL_HANDLE;
VkShaderModule m_efb_color_to_tex_shader = VK_NULL_HANDLE;
VkShaderModule m_efb_depth_to_tex_shader = VK_NULL_HANDLE;
};
} // namespace Vulkan

View File

@ -0,0 +1,238 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include <cstring>
#include "Common/CommonFuncs.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/FramebufferManager.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
#include "VideoBackends/Vulkan/Renderer.h"
#include "VideoBackends/Vulkan/StagingTexture2D.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoBackends/Vulkan/Texture2D.h"
#include "VideoBackends/Vulkan/TextureCache.h"
#include "VideoBackends/Vulkan/TextureEncoder.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
#include "VideoCommon/TextureConversionShader.h"
#include "VideoCommon/TextureDecoder.h"
namespace Vulkan
{
TextureEncoder::TextureEncoder()
{
}
TextureEncoder::~TextureEncoder()
{
if (m_encoding_render_pass != VK_NULL_HANDLE)
vkDestroyRenderPass(g_vulkan_context->GetDevice(), m_encoding_render_pass, nullptr);
if (m_encoding_texture_framebuffer != VK_NULL_HANDLE)
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), m_encoding_texture_framebuffer, nullptr);
for (VkShaderModule shader : m_texture_encoding_shaders)
{
if (shader != VK_NULL_HANDLE)
vkDestroyShaderModule(g_vulkan_context->GetDevice(), shader, nullptr);
}
}
bool TextureEncoder::Initialize()
{
if (!CompileShaders())
{
PanicAlert("Failed to compile shaders");
return false;
}
if (!CreateEncodingRenderPass())
{
PanicAlert("Failed to create encode render pass");
return false;
}
if (!CreateEncodingTexture())
{
PanicAlert("Failed to create encoding texture");
return false;
}
if (!CreateDownloadTexture())
{
PanicAlert("Failed to create download texture");
return false;
}
return true;
}
void TextureEncoder::EncodeTextureToRam(StateTracker* state_tracker, VkImageView src_texture,
u8* dest_ptr, u32 format, u32 native_width,
u32 bytes_per_row, u32 num_blocks_y, u32 memory_stride,
PEControl::PixelFormat src_format, bool is_intensity,
int scale_by_half, const EFBRectangle& src_rect)
{
if (m_texture_encoding_shaders[format] == VK_NULL_HANDLE)
{
ERROR_LOG(VIDEO, "Missing encoding fragment shader for format %u", format);
return;
}
// Can't do our own draw within a render pass.
state_tracker->EndRenderPass();
UtilityShaderDraw draw(g_command_buffer_mgr->GetCurrentCommandBuffer(),
g_object_cache->GetPushConstantPipelineLayout(), m_encoding_render_pass,
g_object_cache->GetScreenQuadVertexShader(), VK_NULL_HANDLE,
m_texture_encoding_shaders[format]);
// Uniform - int4 of left,top,native_width,scale
s32 position_uniform[4] = {src_rect.left, src_rect.top, static_cast<s32>(native_width),
scale_by_half ? 2 : 1};
draw.SetPushConstants(position_uniform, sizeof(position_uniform));
// Doesn't make sense to linear filter depth values
draw.SetPSSampler(0, src_texture, (scale_by_half && src_format != PEControl::Z24) ?
g_object_cache->GetLinearSampler() :
g_object_cache->GetPointSampler());
u32 render_width = bytes_per_row / sizeof(u32);
u32 render_height = num_blocks_y;
Util::SetViewportAndScissor(g_command_buffer_mgr->GetCurrentCommandBuffer(), 0, 0, render_width,
render_height);
// TODO: We could use compute shaders here.
VkRect2D render_region = {{0, 0}, {render_width, render_height}};
draw.BeginRenderPass(m_encoding_texture_framebuffer, render_region);
draw.DrawWithoutVertexBuffer(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, 4);
draw.EndRenderPass();
// Transition the image before copying
m_encoding_texture->OverrideImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
m_download_texture->CopyFromImage(g_command_buffer_mgr->GetCurrentCommandBuffer(),
m_encoding_texture->GetImage(), VK_IMAGE_ASPECT_COLOR_BIT, 0, 0,
render_width, render_height, 0, 0);
// Block until the GPU has finished copying to the staging texture.
g_command_buffer_mgr->ExecuteCommandBuffer(false, true);
state_tracker->InvalidateDescriptorSets();
state_tracker->SetPendingRebind();
// Copy from staging texture to the final destination, adjusting pitch if necessary.
m_download_texture->ReadTexels(0, 0, render_width, render_height, dest_ptr, memory_stride);
}
bool TextureEncoder::CompileShaders()
{
// Texture encoding shaders
static const u32 texture_encoding_shader_formats[] = {
GX_TF_I4, GX_TF_I8, GX_TF_IA4, GX_TF_IA8, GX_TF_RGB565, GX_TF_RGB5A3, GX_TF_RGBA8,
GX_CTF_R4, GX_CTF_RA4, GX_CTF_RA8, GX_CTF_A8, GX_CTF_R8, GX_CTF_G8, GX_CTF_B8,
GX_CTF_RG8, GX_CTF_GB8, GX_CTF_Z8H, GX_TF_Z8, GX_CTF_Z16R, GX_TF_Z16, GX_TF_Z24X8,
GX_CTF_Z4, GX_CTF_Z8M, GX_CTF_Z8L, GX_CTF_Z16L};
for (u32 format : texture_encoding_shader_formats)
{
const char* shader_source =
TextureConversionShader::GenerateEncodingShader(format, APIType::Vulkan);
m_texture_encoding_shaders[format] = Util::CompileAndCreateFragmentShader(shader_source);
if (m_texture_encoding_shaders[format] == VK_NULL_HANDLE)
return false;
}
return true;
}
bool TextureEncoder::CreateEncodingRenderPass()
{
VkAttachmentDescription attachments[] = {
{0, ENCODING_TEXTURE_FORMAT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL}};
VkAttachmentReference color_attachment_references[] = {
{0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}};
VkSubpassDescription subpass_descriptions[] = {{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1,
color_attachment_references, nullptr, nullptr, 0,
nullptr}};
VkSubpassDependency dependancies[] = {
{0, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT, VK_DEPENDENCY_BY_REGION_BIT}};
VkRenderPassCreateInfo pass_info = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
nullptr,
0,
static_cast<u32>(ArraySize(attachments)),
attachments,
static_cast<u32>(ArraySize(subpass_descriptions)),
subpass_descriptions,
static_cast<u32>(ArraySize(dependancies)),
dependancies};
VkResult res = vkCreateRenderPass(g_vulkan_context->GetDevice(), &pass_info, nullptr,
&m_encoding_render_pass);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateRenderPass (Encode) failed: ");
return false;
}
return true;
}
bool TextureEncoder::CreateEncodingTexture()
{
// From OGL: Why do we create a 1024 height texture?
m_encoding_texture = Texture2D::Create(
ENCODING_TEXTURE_WIDTH, ENCODING_TEXTURE_HEIGHT, 1, 1, ENCODING_TEXTURE_FORMAT,
VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
if (!m_encoding_texture)
return false;
VkImageView framebuffer_attachments[] = {m_encoding_texture->GetView()};
VkFramebufferCreateInfo framebuffer_info = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
nullptr,
0,
m_encoding_render_pass,
static_cast<u32>(ArraySize(framebuffer_attachments)),
framebuffer_attachments,
m_encoding_texture->GetWidth(),
m_encoding_texture->GetHeight(),
m_encoding_texture->GetLayers()};
VkResult res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &framebuffer_info, nullptr,
&m_encoding_texture_framebuffer);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer failed: ");
return false;
}
return true;
}
bool TextureEncoder::CreateDownloadTexture()
{
m_download_texture =
StagingTexture2D::Create(STAGING_BUFFER_TYPE_READBACK, ENCODING_TEXTURE_WIDTH,
ENCODING_TEXTURE_HEIGHT, ENCODING_TEXTURE_FORMAT);
if (!m_download_texture || !m_download_texture->Map())
return false;
return true;
}
} // namespace Vulkan

View File

@ -0,0 +1,57 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoCommon/VideoCommon.h"
namespace Vulkan
{
class StagingTexture2D;
class StateTracker;
class Texture2D;
class TextureEncoder
{
public:
TextureEncoder();
~TextureEncoder();
bool Initialize();
// Uses an encoding shader to copy src_texture to dest_ptr.
// Assumes that no render pass is currently in progress.
// WARNING: Executes the current command buffer.
void EncodeTextureToRam(StateTracker* state_tracker, VkImageView src_texture, u8* dest_ptr,
u32 format, u32 native_width, u32 bytes_per_row, u32 num_blocks_y,
u32 memory_stride, PEControl::PixelFormat src_format, bool is_intensity,
int scale_by_half, const EFBRectangle& source);
private:
// From OGL.
static const u32 NUM_TEXTURE_ENCODING_SHADERS = 64;
static const u32 ENCODING_TEXTURE_WIDTH = EFB_WIDTH * 4;
static const u32 ENCODING_TEXTURE_HEIGHT = 1024;
static const VkFormat ENCODING_TEXTURE_FORMAT = VK_FORMAT_B8G8R8A8_UNORM;
bool CompileShaders();
bool CreateEncodingRenderPass();
bool CreateEncodingTexture();
bool CreateDownloadTexture();
std::array<VkShaderModule, NUM_TEXTURE_ENCODING_SHADERS> m_texture_encoding_shaders = {};
VkRenderPass m_encoding_render_pass = VK_NULL_HANDLE;
std::unique_ptr<Texture2D> m_encoding_texture;
VkFramebuffer m_encoding_texture_framebuffer = VK_NULL_HANDLE;
std::unique_ptr<StagingTexture2D> m_download_texture;
};
} // namespace Vulkan

View File

@ -0,0 +1,755 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "Common/Assert.h"
#include "Common/CommonFuncs.h"
#include "Common/MathUtil.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
#include "VideoBackends/Vulkan/Renderer.h"
#include "VideoBackends/Vulkan/ShaderCompiler.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
namespace Vulkan
{
namespace Util
{
size_t AlignValue(size_t value, size_t alignment)
{
// Have to use mod rather than masking bits in case alignment is not a power of two.
size_t offset = value % alignment;
if (offset != 0)
value += (alignment - offset);
return value;
}
size_t AlignBufferOffset(size_t offset, size_t alignment)
{
// Assume an offset of zero is already aligned to a value larger than alignment.
if (offset == 0)
return 0;
return AlignValue(offset, alignment);
}
u32 MakeRGBA8Color(float r, float g, float b, float a)
{
return (static_cast<u32>(MathUtil::Clamp(static_cast<int>(r * 255.0f), 0, 255)) << 0) |
(static_cast<u32>(MathUtil::Clamp(static_cast<int>(g * 255.0f), 0, 255)) << 8) |
(static_cast<u32>(MathUtil::Clamp(static_cast<int>(b * 255.0f), 0, 255)) << 16) |
(static_cast<u32>(MathUtil::Clamp(static_cast<int>(a * 255.0f), 0, 255)) << 24);
}
bool IsDepthFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return true;
default:
return false;
}
}
VkFormat GetLinearFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_R8_SRGB:
return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8G8_SRGB:
return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8B8_SRGB:
return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R8G8B8A8_SRGB:
return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_B8G8R8_SRGB:
return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_B8G8R8A8_SRGB:
return VK_FORMAT_B8G8R8A8_UNORM;
default:
return format;
}
}
u32 GetTexelSize(VkFormat format)
{
// Only contains pixel formats we use.
switch (format)
{
case VK_FORMAT_R32_SFLOAT:
return 4;
case VK_FORMAT_D32_SFLOAT:
return 4;
case VK_FORMAT_R8G8B8A8_UNORM:
return 4;
case VK_FORMAT_B8G8R8A8_UNORM:
return 4;
default:
PanicAlert("Unhandled pixel format");
return 1;
}
}
VkBlendFactor GetAlphaBlendFactor(VkBlendFactor factor)
{
switch (factor)
{
case VK_BLEND_FACTOR_SRC_COLOR:
return VK_BLEND_FACTOR_SRC_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case VK_BLEND_FACTOR_DST_COLOR:
return VK_BLEND_FACTOR_DST_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
default:
return factor;
}
}
RasterizationState GetNoCullRasterizationState()
{
RasterizationState state = {};
state.cull_mode = VK_CULL_MODE_NONE;
state.samples = VK_SAMPLE_COUNT_1_BIT;
state.per_sample_shading = VK_FALSE;
state.depth_clamp = VK_FALSE;
return state;
}
DepthStencilState GetNoDepthTestingDepthStencilState()
{
DepthStencilState state = {};
state.test_enable = VK_FALSE;
state.write_enable = VK_FALSE;
state.compare_op = VK_COMPARE_OP_ALWAYS;
return state;
}
BlendState GetNoBlendingBlendState()
{
BlendState state = {};
state.blend_enable = VK_FALSE;
state.blend_op = VK_BLEND_OP_ADD;
state.src_blend = VK_BLEND_FACTOR_ONE;
state.dst_blend = VK_BLEND_FACTOR_ZERO;
state.alpha_blend_op = VK_BLEND_OP_ADD;
state.src_alpha_blend = VK_BLEND_FACTOR_ONE;
state.dst_alpha_blend = VK_BLEND_FACTOR_ZERO;
state.logic_op_enable = VK_FALSE;
state.logic_op = VK_LOGIC_OP_CLEAR;
state.write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
return state;
}
void SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth /*= 0.0f*/, float max_depth /*= 1.0f*/)
{
VkViewport viewport = {static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(width),
static_cast<float>(height),
min_depth,
max_depth};
VkRect2D scissor = {{x, y}, {static_cast<uint32_t>(width), static_cast<uint32_t>(height)}};
vkCmdSetViewport(command_buffer, 0, 1, &viewport);
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
}
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer,
VkAccessFlags src_access_mask, VkAccessFlags dst_access_mask,
VkDeviceSize offset, VkDeviceSize size,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask)
{
VkBufferMemoryBarrier buffer_info = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
src_access_mask, // VkAccessFlags srcAccessMask
dst_access_mask, // VkAccessFlags dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
buffer, // VkBuffer buffer
offset, // VkDeviceSize offset
size // VkDeviceSize size
};
vkCmdPipelineBarrier(command_buffer, src_stage_mask, dst_stage_mask, 0, 0, nullptr, 1,
&buffer_info, 0, nullptr);
}
void ExecuteCurrentCommandsAndRestoreState(StateTracker* state_tracker, bool execute_off_thread,
bool wait_for_completion)
{
state_tracker->EndRenderPass();
g_command_buffer_mgr->ExecuteCommandBuffer(execute_off_thread, wait_for_completion);
state_tracker->InvalidateDescriptorSets();
state_tracker->SetPendingRebind();
}
VkShaderModule CreateShaderModule(const u32* spv, size_t spv_word_count)
{
VkShaderModuleCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
info.codeSize = spv_word_count * sizeof(u32);
info.pCode = spv;
VkShaderModule module;
VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &info, nullptr, &module);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateShaderModule failed: ");
return VK_NULL_HANDLE;
}
return module;
}
VkShaderModule CompileAndCreateVertexShader(const std::string& source_code, bool prepend_header)
{
ShaderCompiler::SPIRVCodeVector code;
if (!ShaderCompiler::CompileVertexShader(&code, source_code.c_str(), source_code.length(),
prepend_header))
{
return VK_NULL_HANDLE;
}
return CreateShaderModule(code.data(), code.size());
}
VkShaderModule CompileAndCreateGeometryShader(const std::string& source_code, bool prepend_header)
{
ShaderCompiler::SPIRVCodeVector code;
if (!ShaderCompiler::CompileGeometryShader(&code, source_code.c_str(), source_code.length(),
prepend_header))
{
return VK_NULL_HANDLE;
}
return CreateShaderModule(code.data(), code.size());
}
VkShaderModule CompileAndCreateFragmentShader(const std::string& source_code, bool prepend_header)
{
ShaderCompiler::SPIRVCodeVector code;
if (!ShaderCompiler::CompileFragmentShader(&code, source_code.c_str(), source_code.length(),
prepend_header))
{
return VK_NULL_HANDLE;
}
return CreateShaderModule(code.data(), code.size());
}
} // namespace Util
template <>
DeferredResourceDestruction
DeferredResourceDestruction::Wrapper<VkCommandPool>(VkCommandPool object)
{
DeferredResourceDestruction ret;
ret.object.command_pool = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyCommandPool(device, obj.command_pool, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction
DeferredResourceDestruction::Wrapper<VkDeviceMemory>(VkDeviceMemory object)
{
DeferredResourceDestruction ret;
ret.object.device_memory = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkFreeMemory(device, obj.device_memory, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkBuffer>(VkBuffer object)
{
DeferredResourceDestruction ret;
ret.object.buffer = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyBuffer(device, obj.buffer, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkBufferView>(VkBufferView object)
{
DeferredResourceDestruction ret;
ret.object.buffer_view = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyBufferView(device, obj.buffer_view, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkImage>(VkImage object)
{
DeferredResourceDestruction ret;
ret.object.image = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyImage(device, obj.image, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkImageView>(VkImageView object)
{
DeferredResourceDestruction ret;
ret.object.image_view = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyImageView(device, obj.image_view, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkRenderPass>(VkRenderPass object)
{
DeferredResourceDestruction ret;
ret.object.render_pass = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyRenderPass(device, obj.render_pass, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction
DeferredResourceDestruction::Wrapper<VkFramebuffer>(VkFramebuffer object)
{
DeferredResourceDestruction ret;
ret.object.framebuffer = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyFramebuffer(device, obj.framebuffer, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction
DeferredResourceDestruction::Wrapper<VkShaderModule>(VkShaderModule object)
{
DeferredResourceDestruction ret;
ret.object.shader_module = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyShaderModule(device, obj.shader_module, nullptr);
};
return ret;
}
template <>
DeferredResourceDestruction DeferredResourceDestruction::Wrapper<VkPipeline>(VkPipeline object)
{
DeferredResourceDestruction ret;
ret.object.pipeline = object;
ret.destroy_callback = [](VkDevice device, const Object& obj) {
vkDestroyPipeline(device, obj.pipeline, nullptr);
};
return ret;
}
UtilityShaderDraw::UtilityShaderDraw(VkCommandBuffer command_buffer,
VkPipelineLayout pipeline_layout, VkRenderPass render_pass,
VkShaderModule vertex_shader, VkShaderModule geometry_shader,
VkShaderModule pixel_shader)
: m_command_buffer(command_buffer)
{
// Populate minimal pipeline state
m_pipeline_info.vertex_format = g_object_cache->GetUtilityShaderVertexFormat();
m_pipeline_info.pipeline_layout = pipeline_layout;
m_pipeline_info.render_pass = render_pass;
m_pipeline_info.vs = vertex_shader;
m_pipeline_info.gs = geometry_shader;
m_pipeline_info.ps = pixel_shader;
m_pipeline_info.rasterization_state.bits = Util::GetNoCullRasterizationState().bits;
m_pipeline_info.depth_stencil_state.bits = Util::GetNoDepthTestingDepthStencilState().bits;
m_pipeline_info.blend_state.bits = Util::GetNoBlendingBlendState().bits;
m_pipeline_info.primitive_topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
}
UtilityShaderVertex* UtilityShaderDraw::ReserveVertices(VkPrimitiveTopology topology, size_t count)
{
m_pipeline_info.primitive_topology = topology;
if (!g_object_cache->GetUtilityShaderVertexBuffer()->ReserveMemory(
sizeof(UtilityShaderVertex) * count, sizeof(UtilityShaderVertex), true, true, true))
PanicAlert("Failed to allocate space for vertices in backend shader");
m_vertex_buffer = g_object_cache->GetUtilityShaderVertexBuffer()->GetBuffer();
m_vertex_buffer_offset = g_object_cache->GetUtilityShaderVertexBuffer()->GetCurrentOffset();
return reinterpret_cast<UtilityShaderVertex*>(
g_object_cache->GetUtilityShaderVertexBuffer()->GetCurrentHostPointer());
}
void UtilityShaderDraw::CommitVertices(size_t count)
{
g_object_cache->GetUtilityShaderVertexBuffer()->CommitMemory(sizeof(UtilityShaderVertex) * count);
m_vertex_count = static_cast<uint32_t>(count);
}
void UtilityShaderDraw::UploadVertices(VkPrimitiveTopology topology, UtilityShaderVertex* vertices,
size_t count)
{
UtilityShaderVertex* upload_vertices = ReserveVertices(topology, count);
memcpy(upload_vertices, vertices, sizeof(UtilityShaderVertex) * count);
CommitVertices(count);
}
u8* UtilityShaderDraw::AllocateVSUniforms(size_t size)
{
if (!g_object_cache->GetUtilityShaderUniformBuffer()->ReserveMemory(
size, g_vulkan_context->GetUniformBufferAlignment(), true, true, true))
PanicAlert("Failed to allocate util uniforms");
return g_object_cache->GetUtilityShaderUniformBuffer()->GetCurrentHostPointer();
}
void UtilityShaderDraw::CommitVSUniforms(size_t size)
{
m_vs_uniform_buffer.buffer = g_object_cache->GetUtilityShaderUniformBuffer()->GetBuffer();
m_vs_uniform_buffer.offset = 0;
m_vs_uniform_buffer.range = size;
m_ubo_offsets[UBO_DESCRIPTOR_SET_BINDING_VS] =
static_cast<uint32_t>(g_object_cache->GetUtilityShaderUniformBuffer()->GetCurrentOffset());
g_object_cache->GetUtilityShaderUniformBuffer()->CommitMemory(size);
}
u8* UtilityShaderDraw::AllocatePSUniforms(size_t size)
{
if (!g_object_cache->GetUtilityShaderUniformBuffer()->ReserveMemory(
size, g_vulkan_context->GetUniformBufferAlignment(), true, true, true))
PanicAlert("Failed to allocate util uniforms");
return g_object_cache->GetUtilityShaderUniformBuffer()->GetCurrentHostPointer();
}
void UtilityShaderDraw::CommitPSUniforms(size_t size)
{
m_ps_uniform_buffer.buffer = g_object_cache->GetUtilityShaderUniformBuffer()->GetBuffer();
m_ps_uniform_buffer.offset = 0;
m_ps_uniform_buffer.range = size;
m_ubo_offsets[UBO_DESCRIPTOR_SET_BINDING_PS] =
static_cast<uint32_t>(g_object_cache->GetUtilityShaderUniformBuffer()->GetCurrentOffset());
g_object_cache->GetUtilityShaderUniformBuffer()->CommitMemory(size);
}
void UtilityShaderDraw::SetPushConstants(const void* data, size_t data_size)
{
_assert_(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE);
vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout,
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0,
static_cast<u32>(data_size), data);
}
void UtilityShaderDraw::SetPSSampler(size_t index, VkImageView view, VkSampler sampler)
{
m_ps_samplers[index].sampler = sampler;
m_ps_samplers[index].imageView = view;
m_ps_samplers[index].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
void UtilityShaderDraw::SetRasterizationState(const RasterizationState& state)
{
m_pipeline_info.rasterization_state.bits = state.bits;
}
void UtilityShaderDraw::SetDepthStencilState(const DepthStencilState& state)
{
m_pipeline_info.depth_stencil_state.bits = state.bits;
}
void UtilityShaderDraw::SetBlendState(const BlendState& state)
{
m_pipeline_info.blend_state.bits = state.bits;
}
void UtilityShaderDraw::BeginRenderPass(VkFramebuffer framebuffer, const VkRect2D& region,
const VkClearValue* clear_value)
{
VkRenderPassBeginInfo begin_info = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
nullptr,
m_pipeline_info.render_pass,
framebuffer,
region,
clear_value ? 1u : 0u,
clear_value};
vkCmdBeginRenderPass(m_command_buffer, &begin_info, VK_SUBPASS_CONTENTS_INLINE);
}
void UtilityShaderDraw::EndRenderPass()
{
vkCmdEndRenderPass(m_command_buffer);
}
void UtilityShaderDraw::Draw()
{
BindVertexBuffer();
BindDescriptors();
if (!BindPipeline())
return;
vkCmdDraw(m_command_buffer, m_vertex_count, 1, 0, 0);
}
void UtilityShaderDraw::DrawQuad(int x, int y, int width, int height, float z)
{
UtilityShaderVertex vertices[4];
vertices[0].SetPosition(-1.0f, 1.0f, z);
vertices[0].SetTextureCoordinates(0.0f, 1.0f);
vertices[0].SetColor(1.0f, 1.0f, 1.0f, 1.0f);
vertices[1].SetPosition(1.0f, 1.0f, z);
vertices[1].SetTextureCoordinates(1.0f, 1.0f);
vertices[1].SetColor(1.0f, 1.0f, 1.0f, 1.0f);
vertices[2].SetPosition(-1.0f, -1.0f, z);
vertices[2].SetTextureCoordinates(0.0f, 0.0f);
vertices[2].SetColor(1.0f, 1.0f, 1.0f, 1.0f);
vertices[3].SetPosition(1.0f, -1.0f, z);
vertices[3].SetTextureCoordinates(1.0f, 0.0f);
vertices[3].SetColor(1.0f, 1.0f, 1.0f, 1.0f);
Util::SetViewportAndScissor(m_command_buffer, x, y, width, height);
UploadVertices(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, vertices, ArraySize(vertices));
Draw();
}
void UtilityShaderDraw::DrawQuad(int dst_x, int dst_y, int dst_width, int dst_height, int src_x,
int src_y, int src_layer, int src_width, int src_height,
int src_full_width, int src_full_height, float z)
{
float u0 = float(src_x) / float(src_full_width);
float v0 = float(src_y) / float(src_full_height);
float u1 = float(src_x + src_width) / float(src_full_width);
float v1 = float(src_y + src_height) / float(src_full_height);
float w = static_cast<float>(src_layer);
UtilityShaderVertex vertices[4];
vertices[0].SetPosition(-1.0f, 1.0f, z);
vertices[0].SetTextureCoordinates(u0, v1, w);
vertices[0].SetColor(1.0f, 1.0f, 1.0f, 1.0f);
vertices[1].SetPosition(1.0f, 1.0f, z);
vertices[1].SetTextureCoordinates(u1, v1, w);
vertices[1].SetColor(1.0f, 1.0f, 1.0f, 1.0f);
vertices[2].SetPosition(-1.0f, -1.0f, z);
vertices[2].SetTextureCoordinates(u0, v0, w);
vertices[2].SetColor(1.0f, 1.0f, 1.0f, 1.0f);
vertices[3].SetPosition(1.0f, -1.0f, z);
vertices[3].SetTextureCoordinates(u1, v0, w);
vertices[3].SetColor(1.0f, 1.0f, 1.0f, 1.0f);
Util::SetViewportAndScissor(m_command_buffer, dst_x, dst_y, dst_width, dst_height);
UploadVertices(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, vertices, ArraySize(vertices));
Draw();
}
void UtilityShaderDraw::DrawColoredQuad(int x, int y, int width, int height, float r, float g,
float b, float a, float z)
{
return DrawColoredQuad(x, y, width, height, Util::MakeRGBA8Color(r, g, b, a), z);
}
void UtilityShaderDraw::DrawColoredQuad(int x, int y, int width, int height, u32 color, float z)
{
UtilityShaderVertex vertices[4];
vertices[0].SetPosition(-1.0f, 1.0f, z);
vertices[0].SetTextureCoordinates(0.0f, 1.0f);
vertices[0].SetColor(color);
vertices[1].SetPosition(1.0f, 1.0f, z);
vertices[1].SetTextureCoordinates(1.0f, 1.0f);
vertices[1].SetColor(color);
vertices[2].SetPosition(-1.0f, -1.0f, z);
vertices[2].SetTextureCoordinates(0.0f, 0.0f);
vertices[2].SetColor(color);
vertices[3].SetPosition(1.0f, -1.0f, z);
vertices[3].SetTextureCoordinates(1.0f, 0.0f);
vertices[3].SetColor(color);
Util::SetViewportAndScissor(m_command_buffer, x, y, width, height);
UploadVertices(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, vertices, ArraySize(vertices));
Draw();
}
void UtilityShaderDraw::SetViewportAndScissor(int x, int y, int width, int height)
{
Util::SetViewportAndScissor(m_command_buffer, x, y, width, height, 0.0f, 1.0f);
}
void UtilityShaderDraw::DrawWithoutVertexBuffer(VkPrimitiveTopology primitive_topology,
u32 vertex_count)
{
m_pipeline_info.vertex_format = nullptr;
m_pipeline_info.primitive_topology = primitive_topology;
BindDescriptors();
if (!BindPipeline())
return;
vkCmdDraw(m_command_buffer, vertex_count, 1, 0, 0);
}
void UtilityShaderDraw::BindVertexBuffer()
{
vkCmdBindVertexBuffers(m_command_buffer, 0, 1, &m_vertex_buffer, &m_vertex_buffer_offset);
}
void UtilityShaderDraw::BindDescriptors()
{
// TODO: This method is a mess, clean it up
std::array<VkDescriptorSet, NUM_DESCRIPTOR_SETS> bind_descriptor_sets = {};
std::array<VkWriteDescriptorSet, NUM_UBO_DESCRIPTOR_SET_BINDINGS + NUM_PIXEL_SHADER_SAMPLERS>
set_writes = {};
uint32_t num_set_writes = 0;
VkDescriptorBufferInfo dummy_uniform_buffer = {
g_object_cache->GetUtilityShaderUniformBuffer()->GetBuffer(), 0, 1};
// uniform buffers
if (m_vs_uniform_buffer.buffer != VK_NULL_HANDLE || m_ps_uniform_buffer.buffer != VK_NULL_HANDLE)
{
VkDescriptorSet set = g_command_buffer_mgr->AllocateDescriptorSet(
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_UNIFORM_BUFFERS));
if (set == VK_NULL_HANDLE)
PanicAlert("Failed to allocate descriptor set for utility draw");
set_writes[num_set_writes++] = {
VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, nullptr, set, UBO_DESCRIPTOR_SET_BINDING_VS, 0, 1,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, nullptr,
(m_vs_uniform_buffer.buffer != VK_NULL_HANDLE) ? &m_vs_uniform_buffer :
&dummy_uniform_buffer,
nullptr};
set_writes[num_set_writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
nullptr,
set,
UBO_DESCRIPTOR_SET_BINDING_GS,
0,
1,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
nullptr,
&dummy_uniform_buffer,
nullptr};
set_writes[num_set_writes++] = {
VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, nullptr, set, UBO_DESCRIPTOR_SET_BINDING_PS, 0, 1,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, nullptr,
(m_ps_uniform_buffer.buffer != VK_NULL_HANDLE) ? &m_ps_uniform_buffer :
&dummy_uniform_buffer,
nullptr};
bind_descriptor_sets[DESCRIPTOR_SET_UNIFORM_BUFFERS] = set;
}
// PS samplers
size_t first_active_sampler;
for (first_active_sampler = 0; first_active_sampler < NUM_PIXEL_SHADER_SAMPLERS;
first_active_sampler++)
{
if (m_ps_samplers[first_active_sampler].imageView != VK_NULL_HANDLE &&
m_ps_samplers[first_active_sampler].sampler != VK_NULL_HANDLE)
{
break;
}
}
// Check if we have any at all, skip the binding process entirely if we don't
if (first_active_sampler != NUM_PIXEL_SHADER_SAMPLERS)
{
// Allocate a new descriptor set
VkDescriptorSet set = g_command_buffer_mgr->AllocateDescriptorSet(
g_object_cache->GetDescriptorSetLayout(DESCRIPTOR_SET_PIXEL_SHADER_SAMPLERS));
if (set == VK_NULL_HANDLE)
PanicAlert("Failed to allocate descriptor set for utility draw");
for (size_t i = 0; i < NUM_PIXEL_SHADER_SAMPLERS; i++)
{
const VkDescriptorImageInfo& info = m_ps_samplers[i];
if (info.imageView != VK_NULL_HANDLE && info.sampler != VK_NULL_HANDLE)
{
set_writes[num_set_writes++] = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
nullptr,
set,
static_cast<uint32_t>(i),
0,
1,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
&info,
nullptr,
nullptr};
}
}
bind_descriptor_sets[DESCRIPTOR_SET_PIXEL_SHADER_SAMPLERS] = set;
}
vkUpdateDescriptorSets(g_vulkan_context->GetDevice(), num_set_writes, set_writes.data(), 0,
nullptr);
// Bind only the sets we updated
if (bind_descriptor_sets[0] != VK_NULL_HANDLE && bind_descriptor_sets[1] == VK_NULL_HANDLE)
{
// UBO only
vkCmdBindDescriptorSets(m_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
m_pipeline_info.pipeline_layout, DESCRIPTOR_SET_UNIFORM_BUFFERS, 1,
&bind_descriptor_sets[0], NUM_UBO_DESCRIPTOR_SET_BINDINGS,
m_ubo_offsets.data());
}
else if (bind_descriptor_sets[0] == VK_NULL_HANDLE && bind_descriptor_sets[1] != VK_NULL_HANDLE)
{
// Samplers only
vkCmdBindDescriptorSets(m_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
m_pipeline_info.pipeline_layout, DESCRIPTOR_SET_PIXEL_SHADER_SAMPLERS,
1, &bind_descriptor_sets[1], 0, nullptr);
}
else if (bind_descriptor_sets[0] != VK_NULL_HANDLE && bind_descriptor_sets[1] != VK_NULL_HANDLE)
{
// Both
vkCmdBindDescriptorSets(m_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
m_pipeline_info.pipeline_layout, DESCRIPTOR_SET_UNIFORM_BUFFERS, 2,
bind_descriptor_sets.data(), NUM_UBO_DESCRIPTOR_SET_BINDINGS,
m_ubo_offsets.data());
}
}
bool UtilityShaderDraw::BindPipeline()
{
VkPipeline pipeline = g_object_cache->GetPipeline(m_pipeline_info);
if (pipeline == VK_NULL_HANDLE)
{
PanicAlert("Failed to get pipeline for backend shader draw");
return false;
}
vkCmdBindPipeline(m_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
return true;
}
} // namespace Vulkan

View File

@ -0,0 +1,206 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
namespace Vulkan
{
class CommandBufferManager;
class ObjectCache;
class StateTracker;
namespace Util
{
size_t AlignValue(size_t value, size_t alignment);
size_t AlignBufferOffset(size_t offset, size_t alignment);
u32 MakeRGBA8Color(float r, float g, float b, float a);
bool IsDepthFormat(VkFormat format);
VkFormat GetLinearFormat(VkFormat format);
u32 GetTexelSize(VkFormat format);
// Map {SRC,DST}_COLOR to {SRC,DST}_ALPHA
VkBlendFactor GetAlphaBlendFactor(VkBlendFactor factor);
RasterizationState GetNoCullRasterizationState();
DepthStencilState GetNoDepthTestingDepthStencilState();
BlendState GetNoBlendingBlendState();
// Combines viewport and scissor updates
void SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth = 0.0f, float max_depth = 1.0f);
// Wrapper for creating an barrier on a buffer
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer,
VkAccessFlags src_access_mask, VkAccessFlags dst_access_mask,
VkDeviceSize offset, VkDeviceSize size,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask);
// Completes the current render pass, executes the command buffer, and restores state ready for next
// render. Use when you want to kick the current buffer to make room for new data.
void ExecuteCurrentCommandsAndRestoreState(StateTracker* state_tracker, bool execute_off_thread,
bool wait_for_completion = false);
// Create a shader module from the specified SPIR-V.
VkShaderModule CreateShaderModule(const u32* spv, size_t spv_word_count);
// Compile a vertex shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateVertexShader(const std::string& source_code,
bool prepend_header = true);
// Compile a geometry shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateGeometryShader(const std::string& source_code,
bool prepend_header = true);
// Compile a fragment shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateFragmentShader(const std::string& source_code,
bool prepend_header = true);
}
// Helper methods for cleaning up device objects, used by deferred destruction
struct DeferredResourceDestruction
{
union Object {
VkCommandPool command_pool;
VkDeviceMemory device_memory;
VkBuffer buffer;
VkBufferView buffer_view;
VkImage image;
VkImageView image_view;
VkRenderPass render_pass;
VkFramebuffer framebuffer;
VkShaderModule shader_module;
VkPipeline pipeline;
} object;
void (*destroy_callback)(VkDevice device, const Object& object);
template <typename T>
static DeferredResourceDestruction Wrapper(T object);
};
// Utility shader vertex format
#pragma pack(push, 1)
struct UtilityShaderVertex
{
float Position[4];
float TexCoord[4];
u32 Color;
void SetPosition(float x, float y)
{
Position[0] = x;
Position[1] = y;
Position[2] = 0.0f;
Position[3] = 1.0f;
}
void SetPosition(float x, float y, float z)
{
Position[0] = x;
Position[1] = y;
Position[2] = z;
Position[3] = 1.0f;
}
void SetTextureCoordinates(float u, float v)
{
TexCoord[0] = u;
TexCoord[1] = v;
TexCoord[2] = 0.0f;
TexCoord[3] = 0.0f;
}
void SetTextureCoordinates(float u, float v, float w)
{
TexCoord[0] = u;
TexCoord[1] = v;
TexCoord[2] = w;
TexCoord[3] = 0.0f;
}
void SetTextureCoordinates(float u, float v, float w, float x)
{
TexCoord[0] = u;
TexCoord[1] = v;
TexCoord[2] = w;
TexCoord[3] = x;
}
void SetColor(u32 color) { Color = color; }
void SetColor(float r, float g, float b) { Color = Util::MakeRGBA8Color(r, g, b, 1.0f); }
void SetColor(float r, float g, float b, float a) { Color = Util::MakeRGBA8Color(r, g, b, a); }
};
#pragma pack(pop)
class UtilityShaderDraw
{
public:
UtilityShaderDraw(VkCommandBuffer command_buffer, VkPipelineLayout pipeline_layout,
VkRenderPass render_pass, VkShaderModule vertex_shader,
VkShaderModule geometry_shader, VkShaderModule pixel_shader);
UtilityShaderVertex* ReserveVertices(VkPrimitiveTopology topology, size_t count);
void CommitVertices(size_t count);
void UploadVertices(VkPrimitiveTopology topology, UtilityShaderVertex* vertices, size_t count);
u8* AllocateVSUniforms(size_t size);
void CommitVSUniforms(size_t size);
u8* AllocatePSUniforms(size_t size);
void CommitPSUniforms(size_t size);
void SetPushConstants(const void* data, size_t data_size);
void SetPSSampler(size_t index, VkImageView view, VkSampler sampler);
void SetRasterizationState(const RasterizationState& state);
void SetDepthStencilState(const DepthStencilState& state);
void SetBlendState(const BlendState& state);
void BeginRenderPass(VkFramebuffer framebuffer, const VkRect2D& region,
const VkClearValue* clear_value = nullptr);
void EndRenderPass();
void Draw();
// NOTE: These methods alter the viewport state of the command buffer.
// Sets texture coordinates to 0..1
void DrawQuad(int x, int y, int width, int height, float z = 0.0f);
// Sets texture coordinates to the specified range
void DrawQuad(int dst_x, int dst_y, int dst_width, int dst_height, int src_x, int src_y,
int src_layer, int src_width, int src_height, int src_full_width,
int src_full_height, float z = 0.0f);
void DrawColoredQuad(int x, int y, int width, int height, u32 color, float z = 0.0f);
void DrawColoredQuad(int x, int y, int width, int height, float r, float g, float b, float a,
float z = 0.0f);
// Draw without a vertex buffer. Assumes viewport has been initialized separately.
void SetViewportAndScissor(int x, int y, int width, int height);
void DrawWithoutVertexBuffer(VkPrimitiveTopology primitive_topology, u32 vertex_count);
private:
void BindVertexBuffer();
void BindDescriptors();
bool BindPipeline();
VkCommandBuffer m_command_buffer = VK_NULL_HANDLE;
VkBuffer m_vertex_buffer = VK_NULL_HANDLE;
VkDeviceSize m_vertex_buffer_offset = 0;
uint32_t m_vertex_count = 0;
VkDescriptorBufferInfo m_vs_uniform_buffer = {};
VkDescriptorBufferInfo m_ps_uniform_buffer = {};
std::array<uint32_t, NUM_UBO_DESCRIPTOR_SET_BINDINGS> m_ubo_offsets = {};
std::array<VkDescriptorImageInfo, NUM_PIXEL_SHADER_SAMPLERS> m_ps_samplers = {};
PipelineInfo m_pipeline_info = {};
};
} // namespace Vulkan

View File

@ -0,0 +1,131 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "Common/Assert.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
#include "VideoBackends/Vulkan/VertexFormat.h"
#include "VideoCommon/CPMemory.h"
#include "VideoCommon/VertexLoaderManager.h"
#include "VideoCommon/VertexShaderGen.h"
namespace Vulkan
{
static VkFormat VarToVkFormat(VarType t, uint32_t components, bool integer)
{
static const VkFormat float_type_lookup[][4] = {
{VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8B8_UNORM,
VK_FORMAT_R8G8B8A8_UNORM}, // VAR_UNSIGNED_BYTE
{VK_FORMAT_R8_SNORM, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8B8_SNORM,
VK_FORMAT_R8G8B8A8_SNORM}, // VAR_BYTE
{VK_FORMAT_R16_UNORM, VK_FORMAT_R16G16_UNORM, VK_FORMAT_R16G16B16_UNORM,
VK_FORMAT_R16G16B16A16_UNORM}, // VAR_UNSIGNED_SHORT
{VK_FORMAT_R16_SNORM, VK_FORMAT_R16G16_SNORM, VK_FORMAT_R16G16B16_SNORM,
VK_FORMAT_R16G16B16A16_SNORM}, // VAR_SHORT
{VK_FORMAT_R32_SFLOAT, VK_FORMAT_R32G32_SFLOAT, VK_FORMAT_R32G32B32_SFLOAT,
VK_FORMAT_R32G32B32A32_SFLOAT} // VAR_FLOAT
};
static const VkFormat integer_type_lookup[][4] = {
{VK_FORMAT_R8_UINT, VK_FORMAT_R8G8_UINT, VK_FORMAT_R8G8B8_UINT,
VK_FORMAT_R8G8B8A8_UINT}, // VAR_UNSIGNED_BYTE
{VK_FORMAT_R8_SINT, VK_FORMAT_R8G8_SINT, VK_FORMAT_R8G8B8_SINT,
VK_FORMAT_R8G8B8A8_SINT}, // VAR_BYTE
{VK_FORMAT_R16_UINT, VK_FORMAT_R16G16_UINT, VK_FORMAT_R16G16B16_UINT,
VK_FORMAT_R16G16B16A16_UINT}, // VAR_UNSIGNED_SHORT
{VK_FORMAT_R16_SINT, VK_FORMAT_R16G16_SINT, VK_FORMAT_R16G16B16_SINT,
VK_FORMAT_R16G16B16A16_SINT}, // VAR_SHORT
{VK_FORMAT_R32_SFLOAT, VK_FORMAT_R32G32_SFLOAT, VK_FORMAT_R32G32B32_SFLOAT,
VK_FORMAT_R32G32B32A32_SFLOAT} // VAR_FLOAT
};
_assert_(components > 0 && components <= 4);
return integer ? integer_type_lookup[t][components - 1] : float_type_lookup[t][components - 1];
}
VertexFormat::VertexFormat(const PortableVertexDeclaration& in_vtx_decl)
{
vtx_decl = in_vtx_decl;
MapAttributes();
SetupInputState();
}
void VertexFormat::MapAttributes()
{
m_num_attributes = 0;
if (vtx_decl.position.enable)
AddAttribute(SHADER_POSITION_ATTRIB, 0,
VarToVkFormat(vtx_decl.position.type, vtx_decl.position.components,
vtx_decl.position.integer),
vtx_decl.position.offset);
for (uint32_t i = 0; i < 3; i++)
{
if (vtx_decl.normals[i].enable)
AddAttribute(SHADER_NORM0_ATTRIB + i, 0,
VarToVkFormat(vtx_decl.normals[i].type, vtx_decl.normals[i].components,
vtx_decl.normals[i].integer),
vtx_decl.normals[i].offset);
}
for (uint32_t i = 0; i < 2; i++)
{
if (vtx_decl.colors[i].enable)
AddAttribute(SHADER_COLOR0_ATTRIB + i, 0,
VarToVkFormat(vtx_decl.colors[i].type, vtx_decl.colors[i].components,
vtx_decl.colors[i].integer),
vtx_decl.colors[i].offset);
}
for (uint32_t i = 0; i < 8; i++)
{
if (vtx_decl.texcoords[i].enable)
AddAttribute(SHADER_TEXTURE0_ATTRIB + i, 0,
VarToVkFormat(vtx_decl.texcoords[i].type, vtx_decl.texcoords[i].components,
vtx_decl.texcoords[i].integer),
vtx_decl.texcoords[i].offset);
}
if (vtx_decl.posmtx.enable)
AddAttribute(
SHADER_POSMTX_ATTRIB, 0,
VarToVkFormat(vtx_decl.posmtx.type, vtx_decl.posmtx.components, vtx_decl.posmtx.integer),
vtx_decl.posmtx.offset);
}
void VertexFormat::SetupInputState()
{
m_binding_description.binding = 0;
m_binding_description.stride = vtx_decl.stride;
m_binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
m_input_state_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
m_input_state_info.pNext = nullptr;
m_input_state_info.flags = 0;
m_input_state_info.vertexBindingDescriptionCount = 1;
m_input_state_info.pVertexBindingDescriptions = &m_binding_description;
m_input_state_info.vertexAttributeDescriptionCount = m_num_attributes;
m_input_state_info.pVertexAttributeDescriptions = m_attribute_descriptions.data();
}
void VertexFormat::AddAttribute(uint32_t location, uint32_t binding, VkFormat format,
uint32_t offset)
{
_assert_(m_num_attributes < MAX_VERTEX_ATTRIBUTES);
m_attribute_descriptions[m_num_attributes].location = location;
m_attribute_descriptions[m_num_attributes].binding = binding;
m_attribute_descriptions[m_num_attributes].format = format;
m_attribute_descriptions[m_num_attributes].offset = offset;
m_num_attributes++;
}
void VertexFormat::SetupVertexPointers()
{
}
} // namespace Vulkan

View File

@ -0,0 +1,44 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoCommon/NativeVertexFormat.h"
namespace Vulkan
{
class VertexFormat : public ::NativeVertexFormat
{
public:
VertexFormat(const PortableVertexDeclaration& in_vtx_decl);
// Passed to pipeline state creation
const VkPipelineVertexInputStateCreateInfo& GetVertexInputStateInfo() const
{
return m_input_state_info;
}
// Converting PortableVertexDeclaration -> Vulkan types
void MapAttributes();
void SetupInputState();
// Not used in the Vulkan backend.
void SetupVertexPointers() override;
private:
void AddAttribute(uint32_t location, uint32_t binding, VkFormat format, uint32_t offset);
VkVertexInputBindingDescription m_binding_description = {};
std::array<VkVertexInputAttributeDescription, MAX_VERTEX_ATTRIBUTES> m_attribute_descriptions =
{};
VkPipelineVertexInputStateCreateInfo m_input_state_info = {};
uint32_t m_num_attributes = 0;
};
}

View File

@ -0,0 +1,218 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "VideoBackends/Vulkan/VertexManager.h"
#include "VideoBackends/Vulkan/BoundingBox.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/FramebufferManager.h"
#include "VideoBackends/Vulkan/Renderer.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/StreamBuffer.h"
#include "VideoBackends/Vulkan/Util.h"
#include "VideoBackends/Vulkan/VertexFormat.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
#include "VideoCommon/BoundingBox.h"
#include "VideoCommon/IndexGenerator.h"
#include "VideoCommon/Statistics.h"
#include "VideoCommon/VertexLoaderManager.h"
#include "VideoCommon/VideoConfig.h"
namespace Vulkan
{
// TODO: Clean up this mess
constexpr size_t INITIAL_VERTEX_BUFFER_SIZE = VertexManager::MAXVBUFFERSIZE * 2;
constexpr size_t MAX_VERTEX_BUFFER_SIZE = VertexManager::MAXVBUFFERSIZE * 16;
constexpr size_t INITIAL_INDEX_BUFFER_SIZE = VertexManager::MAXIBUFFERSIZE * sizeof(u16) * 2;
constexpr size_t MAX_INDEX_BUFFER_SIZE = VertexManager::MAXIBUFFERSIZE * sizeof(u16) * 16;
VertexManager::VertexManager()
: m_cpu_vertex_buffer(MAXVBUFFERSIZE), m_cpu_index_buffer(MAXIBUFFERSIZE)
{
}
VertexManager::~VertexManager()
{
}
bool VertexManager::Initialize(StateTracker* state_tracker)
{
m_state_tracker = state_tracker;
m_vertex_stream_buffer = StreamBuffer::Create(VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
INITIAL_VERTEX_BUFFER_SIZE, MAX_VERTEX_BUFFER_SIZE);
m_index_stream_buffer = StreamBuffer::Create(VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
INITIAL_INDEX_BUFFER_SIZE, MAX_INDEX_BUFFER_SIZE);
if (!m_vertex_stream_buffer || !m_index_stream_buffer)
{
PanicAlert("Failed to allocate streaming buffers");
return false;
}
return true;
}
NativeVertexFormat*
VertexManager::CreateNativeVertexFormat(const PortableVertexDeclaration& vtx_decl)
{
return new VertexFormat(vtx_decl);
}
void VertexManager::PrepareDrawBuffers(u32 stride)
{
size_t vertex_data_size = IndexGenerator::GetNumVerts() * stride;
size_t index_data_size = IndexGenerator::GetIndexLen() * sizeof(u16);
m_vertex_stream_buffer->CommitMemory(vertex_data_size);
m_index_stream_buffer->CommitMemory(index_data_size);
ADDSTAT(stats.thisFrame.bytesVertexStreamed, static_cast<int>(vertex_data_size));
ADDSTAT(stats.thisFrame.bytesIndexStreamed, static_cast<int>(index_data_size));
m_state_tracker->SetVertexBuffer(m_vertex_stream_buffer->GetBuffer(), 0);
m_state_tracker->SetIndexBuffer(m_index_stream_buffer->GetBuffer(), 0, VK_INDEX_TYPE_UINT16);
}
void VertexManager::ResetBuffer(u32 stride)
{
if (m_cull_all)
{
// Not drawing on the gpu, so store in a heap buffer instead
m_cur_buffer_pointer = m_base_buffer_pointer = m_cpu_vertex_buffer.data();
m_end_buffer_pointer = m_base_buffer_pointer + m_cpu_vertex_buffer.size();
IndexGenerator::Start(m_cpu_index_buffer.data());
return;
}
// Attempt to allocate from buffers
bool has_vbuffer_allocation = m_vertex_stream_buffer->ReserveMemory(MAXVBUFFERSIZE, stride);
bool has_ibuffer_allocation = m_index_stream_buffer->ReserveMemory(MAXIBUFFERSIZE, sizeof(u16));
if (!has_vbuffer_allocation || !has_ibuffer_allocation)
{
// Flush any pending commands first, so that we can wait on the fences
WARN_LOG(VIDEO, "Executing command list while waiting for space in vertex/index buffer");
Util::ExecuteCurrentCommandsAndRestoreState(m_state_tracker, false);
// Attempt to allocate again, this may cause a fence wait
if (!has_vbuffer_allocation)
has_vbuffer_allocation = m_vertex_stream_buffer->ReserveMemory(MAXVBUFFERSIZE, stride);
if (!has_ibuffer_allocation)
has_ibuffer_allocation = m_index_stream_buffer->ReserveMemory(MAXIBUFFERSIZE, sizeof(u16));
// If we still failed, that means the allocation was too large and will never succeed, so panic
if (!has_vbuffer_allocation || !has_ibuffer_allocation)
PanicAlert("Failed to allocate space in streaming buffers for pending draw");
}
// Update pointers
m_base_buffer_pointer = m_vertex_stream_buffer->GetHostPointer();
m_end_buffer_pointer = m_vertex_stream_buffer->GetCurrentHostPointer() + MAXVBUFFERSIZE;
m_cur_buffer_pointer = m_vertex_stream_buffer->GetCurrentHostPointer();
IndexGenerator::Start(reinterpret_cast<u16*>(m_index_stream_buffer->GetCurrentHostPointer()));
// Update base indices
m_current_draw_base_vertex =
static_cast<u32>(m_vertex_stream_buffer->GetCurrentOffset() / stride);
m_current_draw_base_index =
static_cast<u32>(m_index_stream_buffer->GetCurrentOffset() / sizeof(u16));
}
void VertexManager::vFlush(bool use_dst_alpha)
{
const VertexFormat* vertex_format =
static_cast<VertexFormat*>(VertexLoaderManager::GetCurrentVertexFormat());
u32 vertex_stride = vertex_format->GetVertexStride();
// Commit memory to device
PrepareDrawBuffers(vertex_stride);
// Figure out the number of indices to draw
u32 index_count = IndexGenerator::GetIndexLen();
// Update assembly state
m_state_tracker->SetVertexFormat(vertex_format);
switch (m_current_primitive_type)
{
case PRIMITIVE_POINTS:
m_state_tracker->SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_POINT_LIST);
m_state_tracker->DisableBackFaceCulling();
break;
case PRIMITIVE_LINES:
m_state_tracker->SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_LINE_LIST);
m_state_tracker->DisableBackFaceCulling();
break;
case PRIMITIVE_TRIANGLES:
m_state_tracker->SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP);
g_renderer->SetGenerationMode();
break;
}
// Can we do single-pass dst alpha?
DSTALPHA_MODE dstalpha_mode = DSTALPHA_NONE;
if (use_dst_alpha && g_vulkan_context->SupportsDualSourceBlend())
dstalpha_mode = DSTALPHA_DUAL_SOURCE_BLEND;
// Check for any shader stage changes
m_state_tracker->CheckForShaderChanges(m_current_primitive_type, dstalpha_mode);
// Update any changed constants
m_state_tracker->UpdateVertexShaderConstants();
m_state_tracker->UpdateGeometryShaderConstants();
m_state_tracker->UpdatePixelShaderConstants();
// Flush all EFB pokes and invalidate the peek cache.
// TODO: Cleaner way without the cast.
FramebufferManager* framebuffer_mgr =
static_cast<FramebufferManager*>(g_framebuffer_manager.get());
framebuffer_mgr->InvalidatePeekCache();
framebuffer_mgr->FlushEFBPokes(m_state_tracker);
// If bounding box is enabled, we need to flush any changes first, then invalidate what we have.
if (g_vulkan_context->SupportsBoundingBox())
{
BoundingBox* bounding_box = static_cast<Renderer*>(g_renderer.get())->GetBoundingBox();
bool bounding_box_enabled = (::BoundingBox::active && g_ActiveConfig.bBBoxEnable);
if (bounding_box_enabled)
{
bounding_box->Flush(m_state_tracker);
bounding_box->Invalidate(m_state_tracker);
}
// Update which descriptor set/pipeline layout to use.
m_state_tracker->SetBBoxEnable(bounding_box_enabled);
}
// Bind all pending state to the command buffer
if (!m_state_tracker->Bind())
{
WARN_LOG(VIDEO, "Skipped draw of %u indices", index_count);
return;
}
// Execute the draw
vkCmdDrawIndexed(g_command_buffer_mgr->GetCurrentCommandBuffer(), index_count, 1,
m_current_draw_base_index, m_current_draw_base_vertex, 0);
// If we can't do single pass dst alpha, we now need to draw the alpha pass.
if (use_dst_alpha && !g_vulkan_context->SupportsDualSourceBlend())
{
m_state_tracker->CheckForShaderChanges(m_current_primitive_type, DSTALPHA_ALPHA_PASS);
if (!m_state_tracker->Bind())
{
WARN_LOG(VIDEO, "Skipped draw of %u indices (alpha pass)", index_count);
return;
}
vkCmdDrawIndexed(g_command_buffer_mgr->GetCurrentCommandBuffer(), index_count, 1,
m_current_draw_base_index, m_current_draw_base_vertex, 0);
}
m_state_tracker->OnDraw();
}
} // namespace Vulkan

View File

@ -0,0 +1,44 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "VideoCommon/VertexManagerBase.h"
namespace Vulkan
{
class StateTracker;
class StreamBuffer;
class VertexManager : public VertexManagerBase
{
public:
VertexManager();
~VertexManager();
bool Initialize(StateTracker* state_tracker);
NativeVertexFormat* CreateNativeVertexFormat(const PortableVertexDeclaration& vtx_decl) override;
protected:
void PrepareDrawBuffers(u32 stride);
void ResetBuffer(u32 stride) override;
private:
void vFlush(bool use_dst_alpha) override;
StateTracker* m_state_tracker = nullptr;
std::vector<u8> m_cpu_vertex_buffer;
std::vector<u16> m_cpu_index_buffer;
std::unique_ptr<StreamBuffer> m_vertex_stream_buffer;
std::unique_ptr<StreamBuffer> m_index_stream_buffer;
u32 m_current_draw_base_vertex = 0;
u32 m_current_draw_base_index = 0;
};
}

View File

@ -0,0 +1,25 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include "VideoCommon/VideoBackendBase.h"
namespace Vulkan
{
class VideoBackend : public VideoBackendBase
{
bool Initialize(void* window_handle) override;
void Shutdown() override;
std::string GetName() const override { return "Vulkan"; }
std::string GetDisplayName() const override { return "Vulkan (experimental)"; }
void Video_Prepare() override;
void Video_Cleanup() override;
void InitBackendInfo() override;
unsigned int PeekMessages() override { return 0; }
};
}

View File

@ -0,0 +1,119 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{29F29A19-F141-45AD-9679-5A2923B49DA3}</ProjectGuid>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<PlatformToolset>v140</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)'=='Debug'" Label="Configuration">
<UseDebugLibraries>true</UseDebugLibraries>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)'=='Release'" Label="Configuration">
<UseDebugLibraries>false</UseDebugLibraries>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="..\..\..\VSProps\Base.props" />
<Import Project="..\..\..\VSProps\PCHUse.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<AdditionalIncludeDirectories>$(ExternalsDir)Vulkan\include;$(ExternalsDir)glslang\glslang\Public;$(ExternalsDir)glslang\SPIRV;$(ExternalsDir)zlib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Lib />
<Lib />
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<AdditionalIncludeDirectories>$(ExternalsDir)Vulkan\include;$(ExternalsDir)glslang\glslang\Public;$(ExternalsDir)glslang\SPIRV;$(ExternalsDir)zlib;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ClCompile>
<Lib />
<Lib />
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="BoundingBox.cpp" />
<ClCompile Include="CommandBufferManager.cpp" />
<ClCompile Include="FramebufferManager.cpp" />
<ClCompile Include="main.cpp" />
<ClCompile Include="PaletteTextureConverter.cpp" />
<ClCompile Include="PerfQuery.cpp" />
<ClCompile Include="RasterFont.cpp" />
<ClCompile Include="StagingBuffer.cpp" />
<ClCompile Include="StagingTexture2D.cpp" />
<ClCompile Include="TextureEncoder.cpp" />
<ClCompile Include="Util.cpp" />
<ClCompile Include="VertexFormat.cpp" />
<ClCompile Include="ObjectCache.cpp" />
<ClCompile Include="Renderer.cpp" />
<ClCompile Include="ShaderCompiler.cpp" />
<ClCompile Include="StateTracker.cpp" />
<ClCompile Include="StreamBuffer.cpp" />
<ClCompile Include="SwapChain.cpp" />
<ClCompile Include="Texture2D.cpp" />
<ClCompile Include="TextureCache.cpp" />
<ClCompile Include="VertexManager.cpp" />
<ClCompile Include="VulkanContext.cpp" />
<ClCompile Include="VulkanLoader.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="BoundingBox.h" />
<ClInclude Include="CommandBufferManager.h" />
<ClInclude Include="FramebufferManager.h" />
<ClInclude Include="Constants.h" />
<ClInclude Include="PaletteTextureConverter.h" />
<ClInclude Include="RasterFont.h" />
<ClInclude Include="StagingBuffer.h" />
<ClInclude Include="StagingTexture2D.h" />
<ClInclude Include="TextureEncoder.h" />
<ClInclude Include="Util.h" />
<ClInclude Include="VertexFormat.h" />
<ClInclude Include="PerfQuery.h" />
<ClInclude Include="ObjectCache.h" />
<ClInclude Include="Renderer.h" />
<ClInclude Include="ShaderCompiler.h" />
<ClInclude Include="StateTracker.h" />
<ClInclude Include="StreamBuffer.h" />
<ClInclude Include="SwapChain.h" />
<ClInclude Include="Texture2D.h" />
<ClInclude Include="TextureCache.h" />
<ClInclude Include="VertexManager.h" />
<ClInclude Include="VideoBackend.h" />
<ClInclude Include="VulkanContext.h" />
<ClInclude Include="VulkanLoader.h" />
</ItemGroup>
<ItemGroup>
<Text Include="CMakeLists.txt" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="$(CoreDir)VideoCommon\VideoCommon.vcxproj">
<Project>{3de9ee35-3e91-4f27-a014-2866ad8c3fe3}</Project>
</ProjectReference>
<ProjectReference Include="..\..\..\..\Externals\glslang\glslang.vcxproj">
<Project>{d178061b-84d3-44f9-beed-efd18d9033f0}</Project>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<None Include="VulkanEntryPoints.inl" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>

View File

@ -0,0 +1,719 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <algorithm>
#include "Common/Assert.h"
#include "Common/CommonFuncs.h"
#include "Common/Logging/Log.h"
#include "Common/MsgHandler.h"
#include "Common/StringUtil.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
#include "VideoCommon/DriverDetails.h"
namespace Vulkan
{
std::unique_ptr<VulkanContext> g_vulkan_context;
VulkanContext::VulkanContext(VkInstance instance, VkPhysicalDevice physical_device)
: m_instance(instance), m_physical_device(physical_device)
{
// Read device physical memory properties, we need it for allocating buffers
vkGetPhysicalDeviceProperties(physical_device, &m_device_properties);
vkGetPhysicalDeviceMemoryProperties(physical_device, &m_device_memory_properties);
// Would any drivers be this silly? I hope not...
m_device_properties.limits.minUniformBufferOffsetAlignment = std::max(
m_device_properties.limits.minUniformBufferOffsetAlignment, static_cast<VkDeviceSize>(1));
m_device_properties.limits.minTexelBufferOffsetAlignment = std::max(
m_device_properties.limits.minTexelBufferOffsetAlignment, static_cast<VkDeviceSize>(1));
m_device_properties.limits.optimalBufferCopyOffsetAlignment = std::max(
m_device_properties.limits.optimalBufferCopyOffsetAlignment, static_cast<VkDeviceSize>(1));
m_device_properties.limits.optimalBufferCopyRowPitchAlignment = std::max(
m_device_properties.limits.optimalBufferCopyRowPitchAlignment, static_cast<VkDeviceSize>(1));
}
VulkanContext::~VulkanContext()
{
if (m_device != VK_NULL_HANDLE)
vkDestroyDevice(m_device, nullptr);
if (m_debug_report_callback != VK_NULL_HANDLE)
DisableDebugReports();
vkDestroyInstance(m_instance, nullptr);
}
bool VulkanContext::CheckValidationLayerAvailablility()
{
u32 extension_count = 0;
VkResult res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkEnumerateInstanceExtensionProperties failed: ");
return false;
}
std::vector<VkExtensionProperties> extension_list(extension_count);
res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, extension_list.data());
_assert_(res == VK_SUCCESS);
u32 layer_count = 0;
res = vkEnumerateInstanceLayerProperties(&layer_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkEnumerateInstanceExtensionProperties failed: ");
return false;
}
std::vector<VkLayerProperties> layer_list(layer_count);
res = vkEnumerateInstanceLayerProperties(&layer_count, layer_list.data());
_assert_(res == VK_SUCCESS);
// Check for both VK_EXT_debug_report and VK_LAYER_LUNARG_standard_validation
return (std::find_if(extension_list.begin(), extension_list.end(),
[](const auto& it) {
return strcmp(it.extensionName, VK_EXT_DEBUG_REPORT_EXTENSION_NAME) == 0;
}) != extension_list.end() &&
std::find_if(layer_list.begin(), layer_list.end(), [](const auto& it) {
return strcmp(it.layerName, "VK_LAYER_LUNARG_standard_validation") == 0;
}) != layer_list.end());
}
VkInstance VulkanContext::CreateVulkanInstance(bool enable_surface, bool enable_validation_layer)
{
ExtensionList enabled_extensions;
if (!SelectInstanceExtensions(&enabled_extensions, enable_surface, enable_validation_layer))
return VK_NULL_HANDLE;
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app_info.pNext = nullptr;
app_info.pApplicationName = "Dolphin Emulator";
app_info.applicationVersion = VK_MAKE_VERSION(5, 0, 0);
app_info.pEngineName = "Dolphin Emulator";
app_info.engineVersion = VK_MAKE_VERSION(5, 0, 0);
app_info.apiVersion = VK_MAKE_VERSION(1, 0, 0);
VkInstanceCreateInfo instance_create_info = {};
instance_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
instance_create_info.pNext = nullptr;
instance_create_info.flags = 0;
instance_create_info.pApplicationInfo = &app_info;
instance_create_info.enabledExtensionCount = static_cast<uint32_t>(enabled_extensions.size());
instance_create_info.ppEnabledExtensionNames = enabled_extensions.data();
instance_create_info.enabledLayerCount = 0;
instance_create_info.ppEnabledLayerNames = nullptr;
// Enable debug layer on debug builds
if (enable_validation_layer)
{
static const char* layer_names[] = {"VK_LAYER_LUNARG_standard_validation"};
instance_create_info.enabledLayerCount = 1;
instance_create_info.ppEnabledLayerNames = layer_names;
}
VkInstance instance;
VkResult res = vkCreateInstance(&instance_create_info, nullptr, &instance);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateInstance failed: ");
return nullptr;
}
return instance;
}
bool VulkanContext::SelectInstanceExtensions(ExtensionList* extension_list, bool enable_surface,
bool enable_validation_layer)
{
u32 extension_count = 0;
VkResult res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkEnumerateInstanceExtensionProperties failed: ");
return false;
}
if (extension_count == 0)
{
ERROR_LOG(VIDEO, "Vulkan: No extensions supported by instance.");
return false;
}
std::vector<VkExtensionProperties> available_extension_list(extension_count);
res = vkEnumerateInstanceExtensionProperties(nullptr, &extension_count,
available_extension_list.data());
_assert_(res == VK_SUCCESS);
for (const auto& extension_properties : available_extension_list)
INFO_LOG(VIDEO, "Available extension: %s", extension_properties.extensionName);
auto CheckForExtension = [&](const char* name, bool required) -> bool {
if (std::find_if(available_extension_list.begin(), available_extension_list.end(),
[&](const VkExtensionProperties& properties) {
return !strcmp(name, properties.extensionName);
}) != available_extension_list.end())
{
INFO_LOG(VIDEO, "Enabling extension: %s", name);
extension_list->push_back(name);
return true;
}
if (required)
{
ERROR_LOG(VIDEO, "Vulkan: Missing required extension %s.", name);
return false;
}
return true;
};
// Common extensions
if (enable_surface && !CheckForExtension(VK_KHR_SURFACE_EXTENSION_NAME, true))
{
return false;
}
#if defined(VK_USE_PLATFORM_WIN32_KHR)
if (enable_surface && !CheckForExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME, true))
return false;
#elif defined(VK_USE_PLATFORM_XLIB_KHR)
if (enable_surface && !CheckForExtension(VK_KHR_XLIB_SURFACE_EXTENSION_NAME, true))
return false;
#elif defined(VK_USE_PLATFORM_XCB_KHR)
if (enable_surface && !CheckForExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME, true))
return false;
#elif defined(VK_USE_PLATFORM_ANDROID_KHR)
if (enable_surface && !CheckForExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME, true))
return false;
#endif
// VK_EXT_debug_report
if (enable_validation_layer && !CheckForExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME, true))
return false;
return true;
}
VulkanContext::GPUList VulkanContext::EnumerateGPUs(VkInstance instance)
{
u32 gpu_count = 0;
VkResult res = vkEnumeratePhysicalDevices(instance, &gpu_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkEnumeratePhysicalDevices failed: ");
return {};
}
GPUList gpus;
gpus.resize(gpu_count);
res = vkEnumeratePhysicalDevices(instance, &gpu_count, gpus.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkEnumeratePhysicalDevices failed: ");
return {};
}
return gpus;
}
void VulkanContext::PopulateBackendInfo(VideoConfig* config)
{
config->backend_info.api_type = APIType::Vulkan;
config->backend_info.bSupportsExclusiveFullscreen = false; // Currently WSI does not allow this.
config->backend_info.bSupports3DVision = false; // D3D-exclusive.
config->backend_info.bSupportsOversizedViewports = true; // Assumed support.
config->backend_info.bSupportsEarlyZ = true; // Assumed support.
config->backend_info.bSupportsPrimitiveRestart = true; // Assumed support.
config->backend_info.bSupportsBindingLayout = false; // Assumed support.
config->backend_info.bSupportsPaletteConversion = true; // Assumed support.
config->backend_info.bSupportsClipControl = true; // Assumed support.
config->backend_info.bSupportsMultithreading = true; // Assumed support.
config->backend_info.bSupportsPostProcessing = false; // No support yet.
config->backend_info.bSupportsDualSourceBlend = false; // Dependent on features.
config->backend_info.bSupportsGeometryShaders = false; // Dependent on features.
config->backend_info.bSupportsGSInstancing = false; // Dependent on features.
config->backend_info.bSupportsBBox = false; // Dependent on features.
config->backend_info.bSupportsSSAA = false; // Dependent on features.
config->backend_info.bSupportsDepthClamp = false; // Dependent on features.
config->backend_info.bSupportsReversedDepthRange = false; // No support yet due to driver bugs.
}
void VulkanContext::PopulateBackendInfoAdapters(VideoConfig* config, const GPUList& gpu_list)
{
config->backend_info.Adapters.clear();
for (VkPhysicalDevice physical_device : gpu_list)
{
VkPhysicalDeviceProperties properties;
vkGetPhysicalDeviceProperties(physical_device, &properties);
config->backend_info.Adapters.push_back(properties.deviceName);
}
}
void VulkanContext::PopulateBackendInfoFeatures(VideoConfig* config, VkPhysicalDevice gpu,
const VkPhysicalDeviceFeatures& features)
{
config->backend_info.bSupportsDualSourceBlend = (features.dualSrcBlend == VK_TRUE);
config->backend_info.bSupportsGeometryShaders = (features.geometryShader == VK_TRUE);
config->backend_info.bSupportsGSInstancing = (features.geometryShader == VK_TRUE);
config->backend_info.bSupportsBBox = (features.fragmentStoresAndAtomics == VK_TRUE);
config->backend_info.bSupportsSSAA = (features.sampleRateShading == VK_TRUE);
// Disable geometry shader when shaderTessellationAndGeometryPointSize is not supported.
// Seems this is needed for gl_Layer.
if (!features.shaderTessellationAndGeometryPointSize)
config->backend_info.bSupportsGeometryShaders = VK_FALSE;
// TODO: Investigate if there's a feature we can enable for GS instancing.
config->backend_info.bSupportsGSInstancing = VK_FALSE;
// Depth clamping implies shaderClipDistance and depthClamp
config->backend_info.bSupportsDepthClamp =
(features.depthClamp == VK_TRUE && features.shaderClipDistance == VK_TRUE);
}
void VulkanContext::PopulateBackendInfoMultisampleModes(
VideoConfig* config, VkPhysicalDevice gpu, const VkPhysicalDeviceProperties& properties)
{
// Query image support for the EFB texture formats.
VkImageFormatProperties efb_color_properties = {};
vkGetPhysicalDeviceImageFormatProperties(
gpu, EFB_COLOR_TEXTURE_FORMAT, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, 0, &efb_color_properties);
VkImageFormatProperties efb_depth_properties = {};
vkGetPhysicalDeviceImageFormatProperties(
gpu, EFB_DEPTH_TEXTURE_FORMAT, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, &efb_depth_properties);
// We can only support MSAA if it's supported on our render target formats.
VkSampleCountFlags supported_sample_counts = properties.limits.framebufferColorSampleCounts &
properties.limits.framebufferDepthSampleCounts &
efb_color_properties.sampleCounts &
efb_color_properties.sampleCounts;
// No AA
config->backend_info.AAModes.clear();
config->backend_info.AAModes.emplace_back(1);
// 2xMSAA/SSAA
if (supported_sample_counts & VK_SAMPLE_COUNT_2_BIT)
config->backend_info.AAModes.emplace_back(2);
// 4xMSAA/SSAA
if (supported_sample_counts & VK_SAMPLE_COUNT_4_BIT)
config->backend_info.AAModes.emplace_back(4);
// 8xMSAA/SSAA
if (supported_sample_counts & VK_SAMPLE_COUNT_8_BIT)
config->backend_info.AAModes.emplace_back(8);
// 16xMSAA/SSAA
if (supported_sample_counts & VK_SAMPLE_COUNT_16_BIT)
config->backend_info.AAModes.emplace_back(16);
// 32xMSAA/SSAA
if (supported_sample_counts & VK_SAMPLE_COUNT_32_BIT)
config->backend_info.AAModes.emplace_back(32);
// 64xMSAA/SSAA
if (supported_sample_counts & VK_SAMPLE_COUNT_64_BIT)
config->backend_info.AAModes.emplace_back(64);
}
std::unique_ptr<VulkanContext> VulkanContext::Create(VkInstance instance, VkPhysicalDevice gpu,
VkSurfaceKHR surface, VideoConfig* config,
bool enable_validation_layer)
{
std::unique_ptr<VulkanContext> context = std::make_unique<VulkanContext>(instance, gpu);
// Initialize DriverDetails so that we can check for bugs to disable features if needed.
DriverDetails::Init(DriverDetails::API_VULKAN,
DriverDetails::TranslatePCIVendorID(context->m_device_properties.vendorID),
DriverDetails::DRIVER_UNKNOWN,
static_cast<double>(context->m_device_properties.driverVersion),
DriverDetails::Family::UNKNOWN);
// Enable debug reports if validation layer is enabled.
if (enable_validation_layer)
context->EnableDebugReports();
// Attempt to create the device.
if (!context->CreateDevice(surface, enable_validation_layer))
{
// Since we are destroying the instance, we're also responsible for destroying the surface.
if (surface != VK_NULL_HANDLE)
vkDestroySurfaceKHR(instance, surface, nullptr);
return nullptr;
}
// Update video config with features.
PopulateBackendInfoFeatures(config, gpu, context->m_device_features);
PopulateBackendInfoMultisampleModes(config, gpu, context->m_device_properties);
return context;
}
bool VulkanContext::SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface,
bool enable_validation_layer)
{
u32 extension_count = 0;
VkResult res =
vkEnumerateDeviceExtensionProperties(m_physical_device, nullptr, &extension_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkEnumerateDeviceExtensionProperties failed: ");
return false;
}
if (extension_count == 0)
{
ERROR_LOG(VIDEO, "Vulkan: No extensions supported by device.");
return false;
}
std::vector<VkExtensionProperties> available_extension_list(extension_count);
res = vkEnumerateDeviceExtensionProperties(m_physical_device, nullptr, &extension_count,
available_extension_list.data());
_assert_(res == VK_SUCCESS);
for (const auto& extension_properties : available_extension_list)
INFO_LOG(VIDEO, "Available extension: %s", extension_properties.extensionName);
auto CheckForExtension = [&](const char* name, bool required) -> bool {
if (std::find_if(available_extension_list.begin(), available_extension_list.end(),
[&](const VkExtensionProperties& properties) {
return !strcmp(name, properties.extensionName);
}) != available_extension_list.end())
{
INFO_LOG(VIDEO, "Enabling extension: %s", name);
extension_list->push_back(name);
return true;
}
if (required)
{
ERROR_LOG(VIDEO, "Vulkan: Missing required extension %s.", name);
return false;
}
return true;
};
if (enable_surface && !CheckForExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME, true))
{
return false;
}
return true;
}
bool VulkanContext::SelectDeviceFeatures()
{
VkPhysicalDeviceProperties properties;
vkGetPhysicalDeviceProperties(m_physical_device, &properties);
VkPhysicalDeviceFeatures available_features;
vkGetPhysicalDeviceFeatures(m_physical_device, &available_features);
// Not having geometry shaders or wide lines will cause issues with rendering.
if (!available_features.geometryShader && !available_features.wideLines)
WARN_LOG(VIDEO, "Vulkan: Missing both geometryShader and wideLines features.");
if (!available_features.largePoints)
WARN_LOG(VIDEO, "Vulkan: Missing large points feature. CPU EFB writes will be slower.");
if (!available_features.occlusionQueryPrecise)
WARN_LOG(VIDEO, "Vulkan: Missing precise occlusion queries. Perf queries will be inaccurate.");
// Check push constant size.
if (properties.limits.maxPushConstantsSize < static_cast<u32>(PUSH_CONSTANT_BUFFER_SIZE))
{
PanicAlert("Vulkan: Push contant buffer size %u is below minimum %u.",
properties.limits.maxPushConstantsSize, static_cast<u32>(PUSH_CONSTANT_BUFFER_SIZE));
return false;
}
// Enable the features we use.
m_device_features.dualSrcBlend = available_features.dualSrcBlend;
m_device_features.geometryShader = available_features.geometryShader;
m_device_features.samplerAnisotropy = available_features.samplerAnisotropy;
m_device_features.logicOp = available_features.logicOp;
m_device_features.fragmentStoresAndAtomics = available_features.fragmentStoresAndAtomics;
m_device_features.sampleRateShading = available_features.sampleRateShading;
m_device_features.largePoints = available_features.largePoints;
m_device_features.shaderStorageImageMultisample =
available_features.shaderStorageImageMultisample;
m_device_features.shaderTessellationAndGeometryPointSize =
available_features.shaderTessellationAndGeometryPointSize;
m_device_features.occlusionQueryPrecise = available_features.occlusionQueryPrecise;
m_device_features.shaderClipDistance = available_features.shaderClipDistance;
m_device_features.depthClamp = available_features.depthClamp;
return true;
}
bool VulkanContext::CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer)
{
u32 queue_family_count;
vkGetPhysicalDeviceQueueFamilyProperties(m_physical_device, &queue_family_count, nullptr);
if (queue_family_count == 0)
{
ERROR_LOG(VIDEO, "No queue families found on specified vulkan physical device.");
return false;
}
std::vector<VkQueueFamilyProperties> queue_family_properties(queue_family_count);
vkGetPhysicalDeviceQueueFamilyProperties(m_physical_device, &queue_family_count,
queue_family_properties.data());
INFO_LOG(VIDEO, "%u vulkan queue families", queue_family_count);
// Find a graphics queue
// Currently we only use a single queue for both graphics and presenting.
// TODO: In the future we could do post-processing and presenting on a different queue.
m_graphics_queue_family_index = queue_family_count;
for (uint32_t i = 0; i < queue_family_count; i++)
{
if (queue_family_properties[i].queueFlags & VK_QUEUE_GRAPHICS_BIT)
{
// Check that it can present to our surface from this queue
if (surface)
{
VkBool32 present_supported;
VkResult res =
vkGetPhysicalDeviceSurfaceSupportKHR(m_physical_device, i, surface, &present_supported);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: ");
return false;
}
if (present_supported)
{
m_graphics_queue_family_index = i;
break;
}
}
else
{
// We don't need present, so any graphics queue will do.
m_graphics_queue_family_index = i;
break;
}
}
}
if (m_graphics_queue_family_index == queue_family_count)
{
ERROR_LOG(VIDEO, "Vulkan: Failed to find an acceptable graphics queue.");
return false;
}
VkDeviceCreateInfo device_info = {};
device_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_info.pNext = nullptr;
device_info.flags = 0;
static constexpr float queue_priorities[] = {1.0f};
VkDeviceQueueCreateInfo queue_info = {};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pNext = nullptr;
queue_info.flags = 0;
queue_info.queueFamilyIndex = m_graphics_queue_family_index;
queue_info.queueCount = 1;
queue_info.pQueuePriorities = queue_priorities;
device_info.queueCreateInfoCount = 1;
device_info.pQueueCreateInfos = &queue_info;
ExtensionList enabled_extensions;
if (!SelectDeviceExtensions(&enabled_extensions, (surface != nullptr), enable_validation_layer))
return false;
device_info.enabledLayerCount = 0;
device_info.ppEnabledLayerNames = nullptr;
device_info.enabledExtensionCount = static_cast<uint32_t>(enabled_extensions.size());
device_info.ppEnabledExtensionNames = enabled_extensions.data();
// Check for required features before creating.
if (!SelectDeviceFeatures())
return false;
device_info.pEnabledFeatures = &m_device_features;
// Enable debug layer on debug builds
if (enable_validation_layer)
{
static const char* layer_names[] = {"VK_LAYER_LUNARG_standard_validation"};
device_info.enabledLayerCount = 1;
device_info.ppEnabledLayerNames = layer_names;
}
VkResult res = vkCreateDevice(m_physical_device, &device_info, nullptr, &m_device);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDevice failed: ");
return false;
}
// With the device created, we can fill the remaining entry points.
if (!LoadVulkanDeviceFunctions(m_device))
return false;
// Grab the graphics queue (only one we're using at this point).
vkGetDeviceQueue(m_device, m_graphics_queue_family_index, 0, &m_graphics_queue);
return true;
}
static VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(VkDebugReportFlagsEXT flags,
VkDebugReportObjectTypeEXT objectType,
uint64_t object, size_t location,
int32_t messageCode,
const char* pLayerPrefix,
const char* pMessage, void* pUserData)
{
std::string log_message =
StringFromFormat("Vulkan debug report: (%s) %s", pLayerPrefix ? pLayerPrefix : "", pMessage);
if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT)
GENERIC_LOG(LogTypes::HOST_GPU, LogTypes::LERROR, "%s", log_message.c_str())
else if (flags & (VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT))
GENERIC_LOG(LogTypes::HOST_GPU, LogTypes::LWARNING, "%s", log_message.c_str())
else if (flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)
GENERIC_LOG(LogTypes::HOST_GPU, LogTypes::LINFO, "%s", log_message.c_str())
else
GENERIC_LOG(LogTypes::HOST_GPU, LogTypes::LDEBUG, "%s", log_message.c_str())
return VK_FALSE;
}
bool VulkanContext::EnableDebugReports()
{
// Already enabled?
if (m_debug_report_callback != VK_NULL_HANDLE)
return true;
// Check for presence of the functions before calling
if (!vkCreateDebugReportCallbackEXT || !vkDestroyDebugReportCallbackEXT ||
!vkDebugReportMessageEXT)
{
return false;
}
VkDebugReportCallbackCreateInfoEXT callback_info = {
VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, nullptr,
VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT |
VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
VK_DEBUG_REPORT_DEBUG_BIT_EXT,
DebugReportCallback, nullptr};
VkResult res =
vkCreateDebugReportCallbackEXT(m_instance, &callback_info, nullptr, &m_debug_report_callback);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDebugReportCallbackEXT failed: ");
return false;
}
return true;
}
void VulkanContext::DisableDebugReports()
{
if (m_debug_report_callback != VK_NULL_HANDLE)
{
vkDestroyDebugReportCallbackEXT(m_instance, m_debug_report_callback, nullptr);
m_debug_report_callback = VK_NULL_HANDLE;
}
}
bool VulkanContext::GetMemoryType(u32 bits, VkMemoryPropertyFlags properties, u32* out_type_index)
{
for (u32 i = 0; i < VK_MAX_MEMORY_TYPES; i++)
{
if ((bits & (1 << i)) != 0)
{
u32 supported = m_device_memory_properties.memoryTypes[i].propertyFlags & properties;
if (supported == properties)
{
*out_type_index = i;
return true;
}
}
}
return false;
}
u32 VulkanContext::GetMemoryType(u32 bits, VkMemoryPropertyFlags properties)
{
u32 type_index = VK_MAX_MEMORY_TYPES;
if (!GetMemoryType(bits, properties, &type_index))
PanicAlert("Unable to find memory type for %x:%x", bits, properties);
return type_index;
}
u32 VulkanContext::GetUploadMemoryType(u32 bits, bool* is_coherent)
{
// Try for coherent memory first.
VkMemoryPropertyFlags flags =
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
u32 type_index;
if (!GetMemoryType(bits, flags, &type_index))
{
WARN_LOG(
VIDEO,
"Vulkan: Failed to find a coherent memory type for uploads, this will affect performance.");
// Try non-coherent memory.
flags &= ~VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
if (!GetMemoryType(bits, flags, &type_index))
{
// We shouldn't have any memory types that aren't host-visible.
PanicAlert("Unable to get memory type for upload.");
type_index = 0;
}
}
if (is_coherent)
*is_coherent = ((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0);
return type_index;
}
u32 VulkanContext::GetReadbackMemoryType(u32 bits, bool* is_coherent, bool* is_cached)
{
// Try for cached and coherent memory first.
VkMemoryPropertyFlags flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
u32 type_index;
if (!GetMemoryType(bits, flags, &type_index))
{
// For readbacks, caching is more important than coherency.
flags &= ~VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
if (!GetMemoryType(bits, flags, &type_index))
{
WARN_LOG(VIDEO, "Vulkan: Failed to find a cached memory type for readbacks, this will affect "
"performance.");
// Remove the cached bit as well.
flags &= ~VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
if (!GetMemoryType(bits, flags, &type_index))
{
// We shouldn't have any memory types that aren't host-visible.
PanicAlert("Unable to get memory type for upload.");
type_index = 0;
}
}
}
if (is_coherent)
*is_coherent = ((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0);
if (is_cached)
*is_cached = ((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0);
return type_index;
}
}

View File

@ -0,0 +1,131 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoCommon/VideoConfig.h"
namespace Vulkan
{
class VulkanContext
{
public:
VulkanContext(VkInstance instance, VkPhysicalDevice physical_device);
~VulkanContext();
// Determines if the Vulkan validation layer is available on the system.
static bool CheckValidationLayerAvailablility();
// Helper method to create a Vulkan instance.
static VkInstance CreateVulkanInstance(bool enable_surface, bool enable_validation_layer);
// Returns a list of Vulkan-compatible GPUs.
using GPUList = std::vector<VkPhysicalDevice>;
static GPUList EnumerateGPUs(VkInstance instance);
// Populates backend/video config.
// These are public so that the backend info can be populated without creating a context.
static void PopulateBackendInfo(VideoConfig* config);
static void PopulateBackendInfoAdapters(VideoConfig* config, const GPUList& gpu_list);
static void PopulateBackendInfoFeatures(VideoConfig* config, VkPhysicalDevice gpu,
const VkPhysicalDeviceFeatures& features);
static void PopulateBackendInfoMultisampleModes(VideoConfig* config, VkPhysicalDevice gpu,
const VkPhysicalDeviceProperties& properties);
// Creates a Vulkan device context.
// This assumes that PopulateBackendInfo and PopulateBackendInfoAdapters has already
// been called for the specified VideoConfig.
static std::unique_ptr<VulkanContext> Create(VkInstance instance, VkPhysicalDevice gpu,
VkSurfaceKHR surface, VideoConfig* config,
bool enable_validation_layer);
// Enable/disable debug message runtime.
// In the future this could be hooked up to the Host GPU logging option.
bool EnableDebugReports();
void DisableDebugReports();
// Global state accessors
VkInstance GetVulkanInstance() const { return m_instance; }
VkPhysicalDevice GetPhysicalDevice() const { return m_physical_device; }
VkDevice GetDevice() const { return m_device; }
VkQueue GetGraphicsQueue() const { return m_graphics_queue; }
u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; }
const VkQueueFamilyProperties& GetGraphicsQueueProperties() const
{
return m_graphics_queue_properties;
}
const VkPhysicalDeviceMemoryProperties& GetDeviceMemoryProperties() const
{
return m_device_memory_properties;
}
const VkPhysicalDeviceProperties& GetDeviceProperties() const { return m_device_properties; }
const VkPhysicalDeviceFeatures& GetDeviceFeatures() const { return m_device_features; }
const VkPhysicalDeviceLimits& GetDeviceLimits() const { return m_device_properties.limits; }
// Support bits
bool SupportsAnisotropicFiltering() const
{
return m_device_features.samplerAnisotropy == VK_TRUE;
}
bool SupportsGeometryShaders() const { return m_device_features.geometryShader == VK_TRUE; }
bool SupportsDualSourceBlend() const { return m_device_features.dualSrcBlend == VK_TRUE; }
bool SupportsLogicOps() const { return m_device_features.logicOp == VK_TRUE; }
bool SupportsBoundingBox() const { return m_device_features.fragmentStoresAndAtomics == VK_TRUE; }
bool SupportsPreciseOcclusionQueries() const
{
return m_device_features.occlusionQueryPrecise == VK_TRUE;
}
// Helpers for getting constants
VkDeviceSize GetUniformBufferAlignment() const
{
return m_device_properties.limits.minUniformBufferOffsetAlignment;
}
VkDeviceSize GetTexelBufferAlignment() const
{
return m_device_properties.limits.minUniformBufferOffsetAlignment;
}
VkDeviceSize GetBufferImageGranularity() const
{
return m_device_properties.limits.bufferImageGranularity;
}
float GetMaxSaxmplerAnisotropy() const { return m_device_properties.limits.maxSamplerAnisotropy; }
// Finds a memory type index for the specified memory properties and the bits returned by
// vkGetImageMemoryRequirements
bool GetMemoryType(u32 bits, VkMemoryPropertyFlags properties, u32* out_type_index);
u32 GetMemoryType(u32 bits, VkMemoryPropertyFlags properties);
// Finds a memory type for upload or readback buffers.
u32 GetUploadMemoryType(u32 bits, bool* is_coherent = nullptr);
u32 GetReadbackMemoryType(u32 bits, bool* is_coherent = nullptr, bool* is_cached = nullptr);
private:
using ExtensionList = std::vector<const char*>;
static bool SelectInstanceExtensions(ExtensionList* extension_list, bool enable_surface,
bool enable_validation_layer);
bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface,
bool enable_validation_layer);
bool SelectDeviceFeatures();
bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer);
VkInstance m_instance = VK_NULL_HANDLE;
VkPhysicalDevice m_physical_device = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
VkQueue m_graphics_queue = VK_NULL_HANDLE;
u32 m_graphics_queue_family_index = 0;
VkQueueFamilyProperties m_graphics_queue_properties = {};
VkDebugReportCallbackEXT m_debug_report_callback = VK_NULL_HANDLE;
VkPhysicalDeviceFeatures m_device_features = {};
VkPhysicalDeviceProperties m_device_properties = {};
VkPhysicalDeviceMemoryProperties m_device_memory_properties = {};
};
extern std::unique_ptr<VulkanContext> g_vulkan_context;
} // namespace Vulkan

View File

@ -0,0 +1,197 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
// Expands the VULKAN_ENTRY_POINT macro for each function when this file is included.
// Parameters: Function name, is required
// VULKAN_MODULE_ENTRY_POINT is for functions in vulkan-1.dll
// VULKAN_INSTANCE_ENTRY_POINT is for instance-specific functions.
// VULKAN_DEVICE_ENTRY_POINT is for device-specific functions.
#ifdef VULKAN_MODULE_ENTRY_POINT
VULKAN_MODULE_ENTRY_POINT(vkCreateInstance, true)
VULKAN_MODULE_ENTRY_POINT(vkGetInstanceProcAddr, true)
VULKAN_MODULE_ENTRY_POINT(vkGetDeviceProcAddr, true)
VULKAN_MODULE_ENTRY_POINT(vkEnumerateInstanceExtensionProperties, true)
VULKAN_MODULE_ENTRY_POINT(vkEnumerateInstanceLayerProperties, true)
#endif // VULKAN_MODULE_ENTRY_POINT
#ifdef VULKAN_INSTANCE_ENTRY_POINT
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyInstance, true)
VULKAN_INSTANCE_ENTRY_POINT(vkEnumeratePhysicalDevices, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceFeatures, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceFormatProperties, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceImageFormatProperties, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceProperties, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceQueueFamilyProperties, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceMemoryProperties, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateDevice, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyDevice, true)
VULKAN_INSTANCE_ENTRY_POINT(vkEnumerateDeviceExtensionProperties, true)
VULKAN_INSTANCE_ENTRY_POINT(vkEnumerateDeviceLayerProperties, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetDeviceQueue, true)
VULKAN_INSTANCE_ENTRY_POINT(vkQueueSubmit, true)
VULKAN_INSTANCE_ENTRY_POINT(vkQueueWaitIdle, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDeviceWaitIdle, true)
VULKAN_INSTANCE_ENTRY_POINT(vkAllocateMemory, true)
VULKAN_INSTANCE_ENTRY_POINT(vkFreeMemory, true)
VULKAN_INSTANCE_ENTRY_POINT(vkMapMemory, true)
VULKAN_INSTANCE_ENTRY_POINT(vkUnmapMemory, true)
VULKAN_INSTANCE_ENTRY_POINT(vkFlushMappedMemoryRanges, true)
VULKAN_INSTANCE_ENTRY_POINT(vkInvalidateMappedMemoryRanges, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetDeviceMemoryCommitment, true)
VULKAN_INSTANCE_ENTRY_POINT(vkBindBufferMemory, true)
VULKAN_INSTANCE_ENTRY_POINT(vkBindImageMemory, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetBufferMemoryRequirements, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetImageMemoryRequirements, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetImageSparseMemoryRequirements, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSparseImageFormatProperties, true)
VULKAN_INSTANCE_ENTRY_POINT(vkQueueBindSparse, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateFence, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyFence, true)
VULKAN_INSTANCE_ENTRY_POINT(vkResetFences, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetFenceStatus, true)
VULKAN_INSTANCE_ENTRY_POINT(vkWaitForFences, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateSemaphore, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroySemaphore, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateEvent, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyEvent, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetEventStatus, true)
VULKAN_INSTANCE_ENTRY_POINT(vkSetEvent, true)
VULKAN_INSTANCE_ENTRY_POINT(vkResetEvent, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateQueryPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyQueryPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetQueryPoolResults, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateBufferView, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyBufferView, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateImage, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyImage, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetImageSubresourceLayout, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateImageView, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyImageView, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateShaderModule, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyShaderModule, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreatePipelineCache, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyPipelineCache, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPipelineCacheData, true)
VULKAN_INSTANCE_ENTRY_POINT(vkMergePipelineCaches, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateGraphicsPipelines, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateComputePipelines, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyPipeline, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreatePipelineLayout, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyPipelineLayout, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateSampler, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroySampler, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateDescriptorSetLayout, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyDescriptorSetLayout, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateDescriptorPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyDescriptorPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkResetDescriptorPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkAllocateDescriptorSets, true)
VULKAN_INSTANCE_ENTRY_POINT(vkFreeDescriptorSets, true)
VULKAN_INSTANCE_ENTRY_POINT(vkUpdateDescriptorSets, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateFramebuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyFramebuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateRenderPass, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyRenderPass, true)
VULKAN_INSTANCE_ENTRY_POINT(vkGetRenderAreaGranularity, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateCommandPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyCommandPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkResetCommandPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkAllocateCommandBuffers, true)
VULKAN_INSTANCE_ENTRY_POINT(vkFreeCommandBuffers, true)
VULKAN_INSTANCE_ENTRY_POINT(vkBeginCommandBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkEndCommandBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkResetCommandBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdBindPipeline, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetViewport, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetScissor, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetLineWidth, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetDepthBias, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetBlendConstants, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetDepthBounds, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetStencilCompareMask, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetStencilWriteMask, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetStencilReference, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdBindDescriptorSets, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdBindIndexBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdBindVertexBuffers, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdDraw, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdDrawIndexed, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdDrawIndirect, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdDrawIndexedIndirect, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdDispatch, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdDispatchIndirect, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdCopyBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdCopyImage, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdBlitImage, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdCopyBufferToImage, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdCopyImageToBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdUpdateBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdFillBuffer, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdClearColorImage, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdClearDepthStencilImage, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdClearAttachments, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdResolveImage, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdSetEvent, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdResetEvent, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdWaitEvents, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdPipelineBarrier, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdBeginQuery, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdEndQuery, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdResetQueryPool, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdWriteTimestamp, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdCopyQueryPoolResults, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdPushConstants, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdBeginRenderPass, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdNextSubpass, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdEndRenderPass, true)
VULKAN_INSTANCE_ENTRY_POINT(vkCmdExecuteCommands, true)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroySurfaceKHR, false)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfaceSupportKHR, false)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfaceCapabilitiesKHR, false)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfaceFormatsKHR, false)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceSurfacePresentModesKHR, false)
#if defined(VK_USE_PLATFORM_WIN32_KHR)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateWin32SurfaceKHR, false)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceWin32PresentationSupportKHR, false)
#elif defined(VK_USE_PLATFORM_XLIB_KHR)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateXlibSurfaceKHR, false)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceXlibPresentationSupportKHR, false)
#elif defined(VK_USE_PLATFORM_XCB_KHR)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateXcbSurfaceKHR, false)
VULKAN_INSTANCE_ENTRY_POINT(vkGetPhysicalDeviceXcbPresentationSupportKHR, false)
#elif defined(VK_USE_PLATFORM_ANDROID_KHR)
VULKAN_INSTANCE_ENTRY_POINT(vkCreateAndroidSurfaceKHR, false)
#endif
VULKAN_INSTANCE_ENTRY_POINT(vkCreateDebugReportCallbackEXT, false)
VULKAN_INSTANCE_ENTRY_POINT(vkDestroyDebugReportCallbackEXT, false)
VULKAN_INSTANCE_ENTRY_POINT(vkDebugReportMessageEXT, false)
#endif // VULKAN_INSTANCE_ENTRY_POINT
#ifdef VULKAN_DEVICE_ENTRY_POINT
VULKAN_DEVICE_ENTRY_POINT(vkCreateSwapchainKHR, false)
VULKAN_DEVICE_ENTRY_POINT(vkDestroySwapchainKHR, false)
VULKAN_DEVICE_ENTRY_POINT(vkGetSwapchainImagesKHR, false)
VULKAN_DEVICE_ENTRY_POINT(vkAcquireNextImageKHR, false)
VULKAN_DEVICE_ENTRY_POINT(vkQueuePresentKHR, false)
#endif // VULKAN_DEVICE_ENTRY_POINT

View File

@ -0,0 +1,317 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <atomic>
#include <cstdarg>
#include "Common/CommonFuncs.h"
#include "Common/Logging/Log.h"
#include "Common/StringUtil.h"
#include "VideoBackends/Vulkan/VulkanLoader.h"
#if defined(VK_USE_PLATFORM_WIN32_KHR)
#include <Windows.h>
#elif defined(VK_USE_PLATFORM_XLIB_KHR) || defined(VK_USE_PLATFORM_XCB_KHR) || \
defined(VK_USE_PLATFORM_ANDROID_KHR)
#include <dlfcn.h>
#endif
#define VULKAN_MODULE_ENTRY_POINT(name, required) PFN_##name name;
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) PFN_##name name;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) PFN_##name name;
#include "VideoBackends/Vulkan/VulkanEntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT
#undef VULKAN_MODULE_ENTRY_POINT
namespace Vulkan
{
static void ResetVulkanLibraryFunctionPointers()
{
#define VULKAN_MODULE_ENTRY_POINT(name, required) name = nullptr;
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) name = nullptr;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) name = nullptr;
#include "VideoBackends/Vulkan/VulkanEntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT
#undef VULKAN_MODULE_ENTRY_POINT
}
#if defined(VK_USE_PLATFORM_WIN32_KHR)
static HMODULE vulkan_module;
static std::atomic_int vulkan_module_ref_count = {0};
bool LoadVulkanLibrary()
{
// Not thread safe if a second thread calls the loader whilst the first is still in-progress.
if (vulkan_module)
{
vulkan_module_ref_count++;
return true;
}
vulkan_module = LoadLibraryA("vulkan-1.dll");
if (!vulkan_module)
{
ERROR_LOG(VIDEO, "Failed to load vulkan-1.dll");
return false;
}
bool required_functions_missing = false;
auto LoadFunction = [&](FARPROC* func_ptr, const char* name, bool is_required) {
*func_ptr = GetProcAddress(vulkan_module, name);
if (!(*func_ptr) && is_required)
{
ERROR_LOG(VIDEO, "Vulkan: Failed to load required module function %s", name);
required_functions_missing = true;
}
};
#define VULKAN_MODULE_ENTRY_POINT(name, required) \
LoadFunction(reinterpret_cast<FARPROC*>(&name), #name, required);
#include "VideoBackends/Vulkan/VulkanEntryPoints.inl"
#undef VULKAN_MODULE_ENTRY_POINT
if (required_functions_missing)
{
ResetVulkanLibraryFunctionPointers();
FreeLibrary(vulkan_module);
vulkan_module = nullptr;
return false;
}
vulkan_module_ref_count++;
return true;
}
void UnloadVulkanLibrary()
{
if ((--vulkan_module_ref_count) > 0)
return;
ResetVulkanLibraryFunctionPointers();
FreeLibrary(vulkan_module);
vulkan_module = nullptr;
}
#elif defined(VK_USE_PLATFORM_XLIB_KHR) || defined(VK_USE_PLATFORM_XCB_KHR) || \
defined(VK_USE_PLATFORM_ANDROID_KHR)
static void* vulkan_module;
static std::atomic_int vulkan_module_ref_count = {0};
bool LoadVulkanLibrary()
{
// Not thread safe if a second thread calls the loader whilst the first is still in-progress.
if (vulkan_module)
{
vulkan_module_ref_count++;
return true;
}
// Names of libraries to search. Desktop should use libvulkan.so.1 or libvulkan.so.
static const char* search_lib_names[] = {"libvulkan.so.1", "libvulkan.so"};
for (size_t i = 0; i < ArraySize(search_lib_names); i++)
{
vulkan_module = dlopen(search_lib_names[i], RTLD_NOW);
if (vulkan_module)
break;
}
if (!vulkan_module)
{
ERROR_LOG(VIDEO, "Failed to load or locate libvulkan.so");
return false;
}
bool required_functions_missing = false;
auto LoadFunction = [&](void** func_ptr, const char* name, bool is_required) {
*func_ptr = dlsym(vulkan_module, name);
if (!(*func_ptr) && is_required)
{
ERROR_LOG(VIDEO, "Vulkan: Failed to load required module function %s", name);
required_functions_missing = true;
}
};
#define VULKAN_MODULE_ENTRY_POINT(name, required) \
LoadFunction(reinterpret_cast<void**>(&name), #name, required);
#include "VideoBackends/Vulkan/VulkanEntryPoints.inl"
#undef VULKAN_MODULE_ENTRY_POINT
if (required_functions_missing)
{
ResetVulkanLibraryFunctionPointers();
dlclose(vulkan_module);
vulkan_module = nullptr;
return false;
}
vulkan_module_ref_count++;
return true;
}
void UnloadVulkanLibrary()
{
if ((--vulkan_module_ref_count) > 0)
return;
ResetVulkanLibraryFunctionPointers();
dlclose(vulkan_module);
vulkan_module = nullptr;
}
#else
//#warning Unknown platform, not compiling loader.
bool LoadVulkanLibrary()
{
return false;
}
void UnloadVulkanLibrary()
{
ResetVulkanLibraryFunctionPointers();
}
#endif
bool LoadVulkanInstanceFunctions(VkInstance instance)
{
bool required_functions_missing = false;
auto LoadFunction = [&](PFN_vkVoidFunction* func_ptr, const char* name, bool is_required) {
*func_ptr = vkGetInstanceProcAddr(instance, name);
if (!(*func_ptr) && is_required)
{
ERROR_LOG(VIDEO, "Vulkan: Failed to load required instance function %s", name);
required_functions_missing = true;
}
};
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) \
LoadFunction(reinterpret_cast<PFN_vkVoidFunction*>(&name), #name, required);
#include "VideoBackends/Vulkan/VulkanEntryPoints.inl"
#undef VULKAN_INSTANCE_ENTRY_POINT
return !required_functions_missing;
}
bool LoadVulkanDeviceFunctions(VkDevice device)
{
bool required_functions_missing = false;
auto LoadFunction = [&](PFN_vkVoidFunction* func_ptr, const char* name, bool is_required) {
*func_ptr = vkGetDeviceProcAddr(device, name);
if (!(*func_ptr) && is_required)
{
ERROR_LOG(VIDEO, "Vulkan: Failed to load required device function %s", name);
required_functions_missing = true;
}
};
#define VULKAN_DEVICE_ENTRY_POINT(name, required) \
LoadFunction(reinterpret_cast<PFN_vkVoidFunction*>(&name), #name, required);
#include "VideoBackends/Vulkan/VulkanEntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
return !required_functions_missing;
}
const char* VkResultToString(VkResult res)
{
switch (res)
{
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
return "VK_NOT_READY";
case VK_TIMEOUT:
return "VK_TIMEOUT";
case VK_EVENT_SET:
return "VK_EVENT_SET";
case VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
default:
return "UNKNOWN_VK_RESULT";
}
}
void LogVulkanResult(int level, const char* func_name, VkResult res, const char* msg, ...)
{
std::va_list ap;
va_start(ap, msg);
std::string real_msg = StringFromFormatV(msg, ap);
va_end(ap);
real_msg = StringFromFormat("(%s) %s (%d: %s)", func_name, real_msg.c_str(),
static_cast<int>(res), VkResultToString(res));
GENERIC_LOG(LogTypes::VIDEO, static_cast<LogTypes::LOG_LEVELS>(level), "%s", real_msg.c_str());
}
} // namespace Vulkan

View File

@ -0,0 +1,45 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#define VK_NO_PROTOTYPES
#if defined(WIN32)
#define VK_USE_PLATFORM_WIN32_KHR
#elif defined(HAVE_X11)
// Currently we're getting xlib handles passed to the backend.
// If this ever changes to xcb, it's a simple change here.
#define VK_USE_PLATFORM_XLIB_KHR
//#define VK_USE_PLATFORM_XCB_KHR
#elif defined(ANDROID)
#define VK_USE_PLATFORM_ANDROID_KHR
#else
//#warning Unknown platform
#endif
#include "vulkan/vulkan.h"
// We abuse the preprocessor here to only need to specify function names once.
#define VULKAN_MODULE_ENTRY_POINT(name, required) extern PFN_##name name;
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) extern PFN_##name name;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) extern PFN_##name name;
#include "VideoBackends/Vulkan/VulkanEntryPoints.inl"
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT
#undef VULKAN_MODULE_ENTRY_POINT
namespace Vulkan
{
bool LoadVulkanLibrary();
bool LoadVulkanInstanceFunctions(VkInstance instance);
bool LoadVulkanDeviceFunctions(VkDevice device);
void UnloadVulkanLibrary();
const char* VkResultToString(VkResult res);
void LogVulkanResult(int level, const char* func_name, VkResult res, const char* msg, ...);
#define LOG_VULKAN_ERROR(res, ...) LogVulkanResult(2, __FUNCTION__, res, __VA_ARGS__)
} // namespace Vulkan

View File

@ -0,0 +1,272 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include <vector>
#include "Core/Host.h"
#include "VideoBackends/Vulkan/CommandBufferManager.h"
#include "VideoBackends/Vulkan/Constants.h"
#include "VideoBackends/Vulkan/FramebufferManager.h"
#include "VideoBackends/Vulkan/ObjectCache.h"
#include "VideoBackends/Vulkan/PerfQuery.h"
#include "VideoBackends/Vulkan/Renderer.h"
#include "VideoBackends/Vulkan/StateTracker.h"
#include "VideoBackends/Vulkan/SwapChain.h"
#include "VideoBackends/Vulkan/TextureCache.h"
#include "VideoBackends/Vulkan/VertexManager.h"
#include "VideoBackends/Vulkan/VideoBackend.h"
#include "VideoBackends/Vulkan/VulkanContext.h"
#include "VideoCommon/DriverDetails.h"
#include "VideoCommon/OnScreenDisplay.h"
#include "VideoCommon/VideoBackendBase.h"
#include "VideoCommon/VideoConfig.h"
namespace Vulkan
{
void VideoBackend::InitBackendInfo()
{
VulkanContext::PopulateBackendInfo(&g_Config);
if (LoadVulkanLibrary())
{
VkInstance temp_instance = VulkanContext::CreateVulkanInstance(false, false);
if (temp_instance)
{
if (LoadVulkanInstanceFunctions(temp_instance))
{
VulkanContext::GPUList gpu_list = VulkanContext::EnumerateGPUs(temp_instance);
VulkanContext::PopulateBackendInfoAdapters(&g_Config, gpu_list);
if (!gpu_list.empty())
{
// Use the selected adapter, or the first to fill features.
size_t device_index = static_cast<size_t>(g_Config.iAdapter);
if (device_index >= gpu_list.size())
device_index = 0;
VkPhysicalDevice gpu = gpu_list[device_index];
VkPhysicalDeviceProperties properties;
vkGetPhysicalDeviceProperties(gpu, &properties);
VkPhysicalDeviceFeatures features;
vkGetPhysicalDeviceFeatures(gpu, &features);
VulkanContext::PopulateBackendInfoFeatures(&g_Config, gpu, features);
VulkanContext::PopulateBackendInfoMultisampleModes(&g_Config, gpu, properties);
}
}
vkDestroyInstance(temp_instance, nullptr);
}
else
{
PanicAlert("Failed to create Vulkan instance.");
}
UnloadVulkanLibrary();
}
else
{
PanicAlert("Failed to load Vulkan library.");
}
}
bool VideoBackend::Initialize(void* window_handle)
{
if (!LoadVulkanLibrary())
{
PanicAlert("Failed to load Vulkan library.");
return false;
}
// HACK: Use InitBackendInfo to initially populate backend features.
// This is because things like stereo get disabled when the config is validated,
// which happens before our device is created (settings control instance behavior),
// and we don't want that to happen if the device actually supports it.
InitBackendInfo();
InitializeShared();
// Check for presence of the debug layer before trying to enable it
bool enable_validation_layer = g_Config.bEnableValidationLayer;
if (enable_validation_layer && !VulkanContext::CheckValidationLayerAvailablility())
{
WARN_LOG(VIDEO, "Validation layer requested but not available, disabling.");
enable_validation_layer = false;
}
// Create Vulkan instance, needed before we can create a surface.
bool enable_surface = (window_handle != nullptr);
VkInstance instance =
VulkanContext::CreateVulkanInstance(enable_surface, enable_validation_layer);
if (instance == VK_NULL_HANDLE)
{
PanicAlert("Failed to create Vulkan instance.");
UnloadVulkanLibrary();
ShutdownShared();
return false;
}
// Load instance function pointers
if (!LoadVulkanInstanceFunctions(instance))
{
PanicAlert("Failed to load Vulkan instance functions.");
vkDestroyInstance(instance, nullptr);
UnloadVulkanLibrary();
ShutdownShared();
return false;
}
// Create Vulkan surface
VkSurfaceKHR surface = VK_NULL_HANDLE;
if (enable_surface)
{
surface = SwapChain::CreateVulkanSurface(instance, window_handle);
if (surface == VK_NULL_HANDLE)
{
PanicAlert("Failed to create Vulkan surface.");
vkDestroyInstance(instance, nullptr);
UnloadVulkanLibrary();
ShutdownShared();
return false;
}
}
// Fill the adapter list, and check if the user has selected an invalid device
// For some reason nvidia's driver crashes randomly if you call vkEnumeratePhysicalDevices
// after creating a device..
VulkanContext::GPUList gpu_list = VulkanContext::EnumerateGPUs(instance);
size_t selected_adapter_index = static_cast<size_t>(g_Config.iAdapter);
if (gpu_list.empty())
{
PanicAlert("No Vulkan physical devices available.");
if (surface != VK_NULL_HANDLE)
vkDestroySurfaceKHR(instance, surface, nullptr);
vkDestroyInstance(instance, nullptr);
UnloadVulkanLibrary();
ShutdownShared();
return false;
}
else if (selected_adapter_index >= gpu_list.size())
{
WARN_LOG(VIDEO, "Vulkan adapter index out of range, selecting first adapter.");
selected_adapter_index = 0;
}
// Pass ownership over to VulkanContext, and let it take care of everything.
g_vulkan_context = VulkanContext::Create(instance, gpu_list[selected_adapter_index], surface,
&g_Config, enable_validation_layer);
if (!g_vulkan_context)
{
PanicAlert("Failed to create Vulkan device");
UnloadVulkanLibrary();
ShutdownShared();
return false;
}
// Create command buffers. We do this separately because the other classes depend on it.
g_command_buffer_mgr = std::make_unique<CommandBufferManager>(g_Config.bBackendMultithreading);
if (!g_command_buffer_mgr->Initialize())
{
PanicAlert("Failed to create Vulkan command buffers");
g_command_buffer_mgr.reset();
g_vulkan_context.reset();
UnloadVulkanLibrary();
ShutdownShared();
return false;
}
// Create main wrapper instances.
g_object_cache = std::make_unique<ObjectCache>();
g_framebuffer_manager = std::make_unique<FramebufferManager>();
g_renderer = std::make_unique<Renderer>();
// Cast to our wrapper classes, so we can call the init methods.
Renderer* renderer = static_cast<Renderer*>(g_renderer.get());
FramebufferManager* framebuffer_mgr =
static_cast<FramebufferManager*>(g_framebuffer_manager.get());
// Invoke init methods on main wrapper classes.
// These have to be done before the others because the destructors
// for the remaining classes may call methods on these.
if (!g_object_cache->Initialize() || !framebuffer_mgr->Initialize() ||
!renderer->Initialize(framebuffer_mgr, window_handle, surface))
{
PanicAlert("Failed to initialize Vulkan classes.");
g_renderer.reset();
g_object_cache.reset();
g_command_buffer_mgr.reset();
g_vulkan_context.reset();
UnloadVulkanLibrary();
ShutdownShared();
return false;
}
// Create remaining wrapper instances.
g_vertex_manager = std::make_unique<VertexManager>();
g_texture_cache = std::make_unique<TextureCache>();
g_perf_query = std::make_unique<PerfQuery>();
VertexManager* vertex_manager = static_cast<VertexManager*>(g_vertex_manager.get());
TextureCache* texture_cache = static_cast<TextureCache*>(g_texture_cache.get());
PerfQuery* perf_query = static_cast<PerfQuery*>(g_perf_query.get());
if (!vertex_manager->Initialize(renderer->GetStateTracker()) ||
!texture_cache->Initialize(renderer->GetStateTracker()) ||
!perf_query->Initialize(renderer->GetStateTracker()))
{
PanicAlert("Failed to initialize Vulkan classes.");
g_perf_query.reset();
g_texture_cache.reset();
g_vertex_manager.reset();
g_renderer.reset();
g_object_cache.reset();
g_command_buffer_mgr.reset();
g_vulkan_context.reset();
UnloadVulkanLibrary();
ShutdownShared();
return false;
}
return true;
}
// This is called after Initialize() from the Core
// Run from the graphics thread
void VideoBackend::Video_Prepare()
{
// Display the name so the user knows which device was actually created
OSD::AddMessage(StringFromFormat("Using physical adapter %s",
g_vulkan_context->GetDeviceProperties().deviceName)
.c_str(),
5000);
}
void VideoBackend::Shutdown()
{
g_command_buffer_mgr->WaitForGPUIdle();
g_object_cache.reset();
g_command_buffer_mgr.reset();
g_vulkan_context.reset();
UnloadVulkanLibrary();
ShutdownShared();
}
void VideoBackend::Video_Cleanup()
{
g_command_buffer_mgr->WaitForGPUIdle();
// Save all cached pipelines out to disk for next time.
g_object_cache->SavePipelineCache();
g_texture_cache.reset();
g_perf_query.reset();
g_vertex_manager.reset();
g_renderer.reset();
g_framebuffer_manager.reset();
CleanupShared();
}
}

View File

@ -15,6 +15,7 @@
#include "VideoBackends/Null/VideoBackend.h"
#include "VideoBackends/OGL/VideoBackend.h"
#include "VideoBackends/Software/VideoBackend.h"
#include "VideoBackends/Vulkan/VideoBackend.h"
#include "VideoCommon/VideoBackendBase.h"
@ -35,7 +36,7 @@ __declspec(dllexport) DWORD NvOptimusEnablement = 1;
void VideoBackendBase::PopulateList()
{
// OGL > D3D11 > D3D12 > SW > Null
// OGL > D3D11 > D3D12 > Vulkan > SW > Null
g_available_video_backends.push_back(std::make_unique<OGL::VideoBackend>());
#ifdef _WIN32
g_available_video_backends.push_back(std::make_unique<DX11::VideoBackend>());
@ -48,6 +49,7 @@ void VideoBackendBase::PopulateList()
g_available_video_backends.push_back(std::make_unique<DX12::VideoBackend>());
}
#endif
g_available_video_backends.push_back(std::make_unique<Vulkan::VideoBackend>());
g_available_video_backends.push_back(std::make_unique<SW::VideoSoftware>());
g_available_video_backends.push_back(std::make_unique<Null::VideoBackend>());

View File

@ -88,6 +88,7 @@ void VideoConfig::Load(const std::string& ini_file)
settings->Get("BorderlessFullscreen", &bBorderlessFullscreen, false);
settings->Get("EnableValidationLayer", &bEnableValidationLayer, false);
settings->Get("BackendMultithreading", &bBackendMultithreading, true);
settings->Get("CommandBufferExecuteInterval", &iCommandBufferExecuteInterval, 100);
settings->Get("SWZComploc", &bZComploc, true);
settings->Get("SWZFreeze", &bZFreeze, true);
@ -195,6 +196,7 @@ void VideoConfig::GameIniLoad()
CHECK_SETTING("Video_Settings", "DisableFog", bDisableFog);
CHECK_SETTING("Video_Settings", "BackendMultithreading", bBackendMultithreading);
CHECK_SETTING("Video_Settings", "CommandBufferExecuteInterval", iCommandBufferExecuteInterval);
CHECK_SETTING("Video_Enhancements", "ForceFiltering", bForceFiltering);
CHECK_SETTING("Video_Enhancements", "MaxAnisotropy",
@ -300,6 +302,7 @@ void VideoConfig::Save(const std::string& ini_file)
settings->Set("BorderlessFullscreen", bBorderlessFullscreen);
settings->Set("EnableValidationLayer", bEnableValidationLayer);
settings->Set("BackendMultithreading", bBackendMultithreading);
settings->Set("CommandBufferExecuteInterval", iCommandBufferExecuteInterval);
settings->Set("SWZComploc", bZComploc);
settings->Set("SWZFreeze", bZFreeze);

View File

@ -149,6 +149,10 @@ struct VideoConfig final
// Multithreaded submission, currently only supported with Vulkan.
bool bBackendMultithreading;
// Early command buffer execution interval in number of draws.
// Currently only supported with Vulkan.
int iCommandBufferExecuteInterval;
// Static config per API
// TODO: Move this out of VideoConfig
struct

View File

@ -81,6 +81,9 @@
<ProjectReference Include="$(CoreDir)VideoBackends\Null\Null.vcxproj">
<Project>{53A5391B-737E-49A8-BC8F-312ADA00736F}</Project>
</ProjectReference>
<ProjectReference Include="$(CoreDir)VideoBackends\Vulkan\Vulkan.vcxproj">
<Project>{29F29A19-F141-45AD-9679-5A2923B49DA3}</Project>
</ProjectReference>
<ProjectReference Include="$(CoreDir)VideoBackends\D3D12\D3D12.vcxproj">
<Project>{570215b7-e32f-4438-95ae-c8d955f9fca3}</Project>
</ProjectReference>

View File

@ -62,6 +62,8 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Software", "Core\VideoBacke
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Null", "Core\VideoBackends\Null\Null.vcxproj", "{53A5391B-737E-49A8-BC8F-312ADA00736F}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Vulkan", "Core\VideoBackends\Vulkan\Vulkan.vcxproj", "{29F29A19-F141-45AD-9679-5A2923B49DA3}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Video Backends", "Video Backends", "{AAD1BCD6-9804-44A5-A5FC-4782EA00E9D4}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "pch", "PCH\pch.vcxproj", "{76563A7F-1011-4EAD-B667-7BB18D09568E}"
@ -196,6 +198,10 @@ Global
{53A5391B-737E-49A8-BC8F-312ADA00736F}.Debug|x64.Build.0 = Debug|x64
{53A5391B-737E-49A8-BC8F-312ADA00736F}.Release|x64.ActiveCfg = Release|x64
{53A5391B-737E-49A8-BC8F-312ADA00736F}.Release|x64.Build.0 = Release|x64
{29F29A19-F141-45AD-9679-5A2923B49DA3}.Debug|x64.ActiveCfg = Debug|x64
{29F29A19-F141-45AD-9679-5A2923B49DA3}.Debug|x64.Build.0 = Debug|x64
{29F29A19-F141-45AD-9679-5A2923B49DA3}.Release|x64.ActiveCfg = Release|x64
{29F29A19-F141-45AD-9679-5A2923B49DA3}.Release|x64.Build.0 = Release|x64
{76563A7F-1011-4EAD-B667-7BB18D09568E}.Debug|x64.ActiveCfg = Debug|x64
{76563A7F-1011-4EAD-B667-7BB18D09568E}.Debug|x64.Build.0 = Debug|x64
{76563A7F-1011-4EAD-B667-7BB18D09568E}.Release|x64.ActiveCfg = Release|x64
@ -253,6 +259,7 @@ Global
{EC1A314C-5588-4506-9C1E-2E58E5817F75} = {AAD1BCD6-9804-44A5-A5FC-4782EA00E9D4}
{A4C423AA-F57C-46C7-A172-D1A777017D29} = {AAD1BCD6-9804-44A5-A5FC-4782EA00E9D4}
{53A5391B-737E-49A8-BC8F-312ADA00736F} = {AAD1BCD6-9804-44A5-A5FC-4782EA00E9D4}
{29F29A19-F141-45AD-9679-5A2923B49DA3} = {AAD1BCD6-9804-44A5-A5FC-4782EA00E9D4}
{AAD1BCD6-9804-44A5-A5FC-4782EA00E9D4} = {15670B2E-CED6-4ED5-94CE-A00B1B2B5BA6}
{76563A7F-1011-4EAD-B667-7BB18D09568E} = {15670B2E-CED6-4ED5-94CE-A00B1B2B5BA6}
{CBC76802-C128-4B17-BF6C-23B08C313E5E} = {87ADDFF9-5768-4DA2-A33B-2477593D6677}