Allocate descriptors in blocks of 8 instead of individually, to reduce overhead.

This commit is contained in:
Henrik Rydgård 2023-10-10 10:00:02 +02:00
parent 6319dc2a90
commit 8ebc08185b
2 changed files with 17 additions and 2 deletions

View File

@ -1339,6 +1339,7 @@ void VulkanQueueRunner::PerformRenderPass(const VKRStep &step, VkCommandBuffer c
case VKRRenderCommand::DRAW_INDEXED:
if (pipelineOK) {
VkDescriptorSet set = (*descSets)[c.drawIndexed.descSetIndex].set;
_dbg_assert_(set != VK_NULL_HANDLE);
vkCmdBindDescriptorSets(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0, 1, &set, c.drawIndexed.numUboOffsets, c.drawIndexed.uboOffsets);
vkCmdBindIndexBuffer(cmd, c.drawIndexed.ibuffer, c.drawIndexed.ioffset, VK_INDEX_TYPE_UINT16);
VkDeviceSize voffset = c.drawIndexed.voffset;

View File

@ -1698,6 +1698,13 @@ void VKRPipelineLayout::FlushDescSets(VulkanContext *vulkan, int frame, QueuePro
pool.Reset();
VkDescriptorSet setCache[8];
VkDescriptorSetLayout layoutsForAlloc[ARRAY_SIZE(setCache)];
for (int i = 0; i < ARRAY_SIZE(setCache); i++) {
layoutsForAlloc[i] = descriptorSetLayout;
}
int setsUsed = ARRAY_SIZE(setCache); // To allocate immediately.
// This will write all descriptors.
// Initially, we just do a simple look-back comparing to the previous descriptor to avoid sequential dupes.
@ -1724,8 +1731,15 @@ void VKRPipelineLayout::FlushDescSets(VulkanContext *vulkan, int frame, QueuePro
}
}
// TODO: Allocate in batches.
pool.Allocate(&d.set, 1, &descriptorSetLayout);
if (setsUsed < ARRAY_SIZE(setCache)) {
d.set = setCache[setsUsed++];
} else {
// Allocate in small batches.
bool success = pool.Allocate(setCache, ARRAY_SIZE(setCache), layoutsForAlloc);
_dbg_assert_(success);
d.set = setCache[0];
setsUsed = 1;
}
// TODO: Build up bigger batches of writes.
const PackedDescriptor *data = descData.begin() + d.offset;