vulkan: Add a common CmdBegin/EndRederPass implementation

This implements vkCmdBeginRenderPass, vkCmdEndRenderPass, and
vkCmdNextSubpass in terms of the new vkCmdBegin/EndRendering included in
VK_KHR_dynamic_rendering and Vulkan 1.3.  All subpass dependencies and
implicit layout transitions are turned into actual barriers.  It does
require VK_KHR_synchronization2 because it always uses the 64-bit
version of the pipeline stage and access bitfields.

Reviewed-by: Ivan Briano <ivan.briano@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14961>
This commit is contained in:
Jason Ekstrand 2022-02-09 16:30:38 -06:00
parent 874aeb8743
commit 1d726940d2
4 changed files with 977 additions and 2 deletions

View File

@ -48,6 +48,7 @@ vk_command_buffer_init(struct vk_command_buffer *command_buffer,
void
vk_command_buffer_reset(struct vk_command_buffer *command_buffer)
{
vk_command_buffer_reset_render_pass(command_buffer);
vk_cmd_queue_reset(&command_buffer->cmd_queue);
util_dynarray_clear(&command_buffer->labels);
command_buffer->region_begin = true;
@ -57,6 +58,7 @@ void
vk_command_buffer_finish(struct vk_command_buffer *command_buffer)
{
list_del(&command_buffer->pool_link);
vk_command_buffer_reset_render_pass(command_buffer);
vk_cmd_queue_finish(&command_buffer->cmd_queue);
util_dynarray_fini(&command_buffer->labels);
vk_object_base_finish(&command_buffer->base);

View File

@ -34,6 +34,32 @@ extern "C" {
#endif
struct vk_command_pool;
struct vk_framebuffer;
struct vk_image_view;
struct vk_render_pass;
/* Since VkSubpassDescription2::viewMask is a 32-bit integer, there are a
* maximum of 32 possible views.
*/
#define MESA_VK_MAX_MULTIVIEW_VIEW_COUNT 32
struct vk_attachment_view_state {
VkImageLayout layout;
VkImageLayout stencil_layout;
};
struct vk_attachment_state {
struct vk_image_view *image_view;
/** A running tally of which views have been loaded */
uint32_t views_loaded;
/** Per-view state */
struct vk_attachment_view_state views[MESA_VK_MAX_MULTIVIEW_VIEW_COUNT];
/** VkRenderPassBeginInfo::pClearValues[i] */
VkClearValue clear_value;
};
struct vk_command_buffer {
struct vk_object_base base;
@ -95,6 +121,15 @@ struct vk_command_buffer {
*/
struct util_dynarray labels;
bool region_begin;
struct vk_render_pass *render_pass;
uint32_t subpass_idx;
struct vk_framebuffer *framebuffer;
VkRect2D render_area;
/* This uses the same trick as STACK_ARRAY */
struct vk_attachment_state *attachments;
struct vk_attachment_state _attachments[8];
};
VK_DEFINE_HANDLE_CASTS(vk_command_buffer, base, VkCommandBuffer,
@ -105,6 +140,9 @@ vk_command_buffer_init(struct vk_command_buffer *command_buffer,
struct vk_command_pool *pool,
VkCommandBufferLevel level);
void
vk_command_buffer_reset_render_pass(struct vk_command_buffer *cmd_buffer);
void
vk_command_buffer_reset(struct vk_command_buffer *command_buffer);

View File

@ -325,6 +325,7 @@ vk_render_pass_attachment_init(struct vk_render_pass_attachment *att,
.format = desc->format,
.aspects = vk_format_aspects(desc->format),
.samples = desc->samples,
.view_mask = 0,
.load_op = desc->loadOp,
.store_op = desc->storeOp,
.stencil_load_op = desc->stencilLoadOp,
@ -453,7 +454,23 @@ vk_common_CreateRenderPass2(VkDevice _device,
subpass->attachment_count = num_subpass_attachments2(desc);
subpass->attachments = next_subpass_attachment;
subpass->view_mask = desc->viewMask;
/* From the Vulkan 1.3.204 spec:
*
* VUID-VkRenderPassCreateInfo2-viewMask-03058
*
* "The VkSubpassDescription2::viewMask member of all elements of
* pSubpasses must either all be 0, or all not be 0"
*/
if (desc->viewMask)
pass->is_multiview = true;
assert(pass->is_multiview == (desc->viewMask != 0));
/* For all view masks in the vk_render_pass data structure, we use a
* mask of 1 for non-multiview instead of a mask of 0.
*/
subpass->view_mask = desc->viewMask ? desc->viewMask : 1;
pass->view_mask |= subpass->view_mask;
subpass->input_count = desc->inputAttachmentCount;
if (desc->inputAttachmentCount > 0) {
@ -546,6 +563,43 @@ vk_common_CreateRenderPass2(VkDevice _device,
assert(next_subpass_attachment ==
subpass_attachments + subpass_attachment_count);
/* Walk backwards over the subpasses to compute view masks and
* last_subpass masks for all attachments.
*/
for (uint32_t s = 0; s < pCreateInfo->subpassCount; s++) {
struct vk_subpass *subpass =
&pass->subpasses[(pCreateInfo->subpassCount - 1) - s];
/* First, compute last_subpass for all the attachments */
for (uint32_t a = 0; a < subpass->attachment_count; a++) {
struct vk_subpass_attachment *att = &subpass->attachments[a];
if (att->attachment == VK_ATTACHMENT_UNUSED)
continue;
assert(att->attachment < pass->attachment_count);
const struct vk_render_pass_attachment *pass_att =
&pass->attachments[att->attachment];
att->last_subpass = subpass->view_mask & ~pass_att->view_mask;
}
/* Then compute pass_att->view_mask. We do the two separately so that
* we end up with the right last_subpass even if the same attachment is
* used twice within a subpass.
*/
for (uint32_t a = 0; a < subpass->attachment_count; a++) {
const struct vk_subpass_attachment *att = &subpass->attachments[a];
if (att->attachment == VK_ATTACHMENT_UNUSED)
continue;
assert(att->attachment < pass->attachment_count);
struct vk_render_pass_attachment *pass_att =
&pass->attachments[att->attachment];
pass_att->view_mask |= subpass->view_mask;
}
}
pass->dependency_count = pCreateInfo->dependencyCount;
for (uint32_t d = 0; d < pCreateInfo->dependencyCount; d++) {
const VkSubpassDependency2 *dep = &pCreateInfo->pDependencies[d];
@ -596,3 +650,856 @@ vk_common_DestroyRenderPass(VkDevice _device,
vk_object_free(device, pAllocator, pass);
}
VKAPI_ATTR void VKAPI_CALL
vk_common_GetRenderAreaGranularity(VkDevice device,
VkRenderPass renderPass,
VkExtent2D *pGranularity)
{
*pGranularity = (VkExtent2D) { 1, 1 };
}
static bool
vk_image_layout_supports_input_attachment(VkImageLayout layout)
{
switch (layout) {
case VK_IMAGE_LAYOUT_GENERAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
return true;
default:
return false;
}
}
struct stage_access {
VkPipelineStageFlagBits2 stages;
VkAccessFlagBits2 access;
};
static bool
vk_image_layout_are_all_aspects_read_only(VkImageLayout layout,
VkImageAspectFlags aspects)
{
u_foreach_bit(a, aspects) {
VkImageAspectFlagBits aspect = 1u << a;
if (!vk_image_layout_is_read_only(layout, aspect))
return false;
}
return true;
}
static struct stage_access
stage_access_for_layout(VkImageLayout layout, VkImageAspectFlags aspects)
{
VkPipelineStageFlagBits2 stages = 0;
VkAccessFlagBits2 access = 0;
if (vk_image_layout_supports_input_attachment(layout)) {
stages |= VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT;
access |= VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT;
}
if (aspects & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
access |= VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
if (!vk_image_layout_are_all_aspects_read_only(layout, aspects)) {
access |= VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
/* It might be a resolve attachment */
stages |= VK_PIPELINE_STAGE_2_TRANSFER_BIT;
access |= VK_ACCESS_2_TRANSFER_WRITE_BIT;
}
} else {
/* Color */
if (!vk_image_layout_are_all_aspects_read_only(layout, aspects)) {
/* There are no read-only color attachments */
stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
access |= VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT;
/* It might be a resolve attachment */
stages |= VK_PIPELINE_STAGE_2_TRANSFER_BIT;
access |= VK_ACCESS_2_TRANSFER_WRITE_BIT;
}
}
return (struct stage_access) {
.stages = stages,
.access = access,
};
}
static void
transition_image_range(const struct vk_image_view *image_view,
VkImageSubresourceRange range,
VkImageLayout old_layout,
VkImageLayout new_layout,
VkImageLayout old_stencil_layout,
VkImageLayout new_stencil_layout,
uint32_t *barrier_count,
uint32_t max_barrier_count,
VkImageMemoryBarrier2 *barriers)
{
VkImageAspectFlags aspects_left = range.aspectMask;
while (aspects_left) {
range.aspectMask = aspects_left;
/* If we have a depth/stencil image and one of the layouts doesn't match
* between depth and stencil, we need two barriers. Restrict to depth
* and we'll pick up stencil on the next iteration.
*/
if (range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT) &&
(old_layout != old_stencil_layout ||
new_layout != new_stencil_layout))
range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
if (range.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
/* We're down to a single aspect bit so this is going to be the last
* iteration and it's fine to stomp the input variables here.
*/
old_layout = old_stencil_layout;
new_layout = new_stencil_layout;
}
if (new_layout != old_layout) {
/* We could go about carefully calculating every possible way the
* attachment may have been used in the render pass or we can break
* out the big hammer and throw in any stage and access flags
* possible for the given layouts.
*/
struct stage_access src_sa, dst_sa;
src_sa = stage_access_for_layout(old_layout, range.aspectMask);
dst_sa = stage_access_for_layout(new_layout, range.aspectMask);
assert(*barrier_count < max_barrier_count);
barriers[(*barrier_count)++] = (VkImageMemoryBarrier2) {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.srcStageMask = src_sa.stages,
.srcAccessMask = src_sa.access,
.dstStageMask = dst_sa.stages,
.dstAccessMask = dst_sa.access,
.oldLayout = old_layout,
.newLayout = new_layout,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = vk_image_to_handle(image_view->image),
.subresourceRange = range,
};
}
aspects_left &= ~range.aspectMask;
}
}
static void
transition_attachment(struct vk_command_buffer *cmd_buffer,
uint32_t att_idx,
uint32_t view_mask,
VkImageLayout layout,
VkImageLayout stencil_layout,
uint32_t *barrier_count,
uint32_t max_barrier_count,
VkImageMemoryBarrier2 *barriers)
{
const struct vk_render_pass *pass = cmd_buffer->render_pass;
const struct vk_framebuffer *framebuffer = cmd_buffer->framebuffer;
const struct vk_render_pass_attachment *pass_att =
&pass->attachments[att_idx];
struct vk_attachment_state *att_state = &cmd_buffer->attachments[att_idx];
const struct vk_image_view *image_view = att_state->image_view;
/* 3D is stupidly special. From the Vulkan 1.3.204 spec:
*
* "When the VkImageSubresourceRange structure is used to select a
* subset of the slices of a 3D images mip level in order to create
* a 2D or 2D array image view of a 3D image created with
* VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, baseArrayLayer and
* layerCount specify the first slice index and the number of slices
* to include in the created image view. Such an image view can be
* used as a framebuffer attachment that refers only to the specified
* range of slices of the selected mip level. However, any layout
* transitions performed on such an attachment view during a render
* pass instance still apply to the entire subresource referenced
* which includes all the slices of the selected mip level."
*
* To deal with this, we expand out the layer range to include the
* entire 3D image and treat them as having only a single view even when
* multiview is enabled. This later part means that we effectively only
* track one image layout for the entire attachment rather than one per
* view like we do for all the others.
*/
if (image_view->image->image_type == VK_IMAGE_TYPE_3D)
view_mask = 1;
u_foreach_bit(view, view_mask) {
assert(view >= 0 && view < MESA_VK_MAX_MULTIVIEW_VIEW_COUNT);
struct vk_attachment_view_state *att_view_state = &att_state->views[view];
/* First, check to see if we even need a transition */
if (att_view_state->layout == layout &&
att_view_state->stencil_layout == stencil_layout)
continue;
VkImageSubresourceRange range = {
.aspectMask = pass_att->aspects,
.baseMipLevel = image_view->base_mip_level,
.levelCount = 1,
};
/* From the Vulkan 1.3.207 spec:
*
* "Automatic layout transitions apply to the entire image
* subresource attached to the framebuffer. If multiview is not
* enabled and the attachment is a view of a 1D or 2D image, the
* automatic layout transitions apply to the number of layers
* specified by VkFramebufferCreateInfo::layers. If multiview is
* enabled and the attachment is a view of a 1D or 2D image, the
* automatic layout transitions apply to the layers corresponding to
* views which are used by some subpass in the render pass, even if
* that subpass does not reference the given attachment. If the
* attachment view is a 2D or 2D array view of a 3D image, even if
* the attachment view only refers to a subset of the slices of the
* selected mip level of the 3D image, automatic layout transitions
* apply to the entire subresource referenced which is the entire mip
* level in this case."
*/
if (image_view->image->image_type == VK_IMAGE_TYPE_3D) {
assert(view == 0);
range.baseArrayLayer = 0;
range.layerCount = image_view->extent.depth;
} else if (pass->is_multiview) {
range.baseArrayLayer = image_view->base_array_layer + view;
range.layerCount = 1;
} else {
assert(view == 0);
range.baseArrayLayer = image_view->base_array_layer;
range.layerCount = framebuffer->layers;
}
transition_image_range(image_view, range,
att_view_state->layout, layout,
att_view_state->stencil_layout, stencil_layout,
barrier_count, max_barrier_count, barriers);
att_view_state->layout = layout;
att_view_state->stencil_layout = stencil_layout;
}
}
static void
load_attachment(struct vk_command_buffer *cmd_buffer,
uint32_t att_idx, uint32_t view_mask,
VkImageLayout layout, VkImageLayout stencil_layout)
{
const struct vk_render_pass *pass = cmd_buffer->render_pass;
const struct vk_framebuffer *framebuffer = cmd_buffer->framebuffer;
const struct vk_render_pass_attachment *rp_att = &pass->attachments[att_idx];
struct vk_attachment_state *att_state = &cmd_buffer->attachments[att_idx];
struct vk_device_dispatch_table *disp =
&cmd_buffer->base.device->dispatch_table;
/* Don't load any views we've already loaded */
view_mask &= ~att_state->views_loaded;
if (view_mask == 0)
return;
/* From here on, if we return, we loaded the views */
att_state->views_loaded |= view_mask;
/* We only need to load/store if there's a clear */
bool need_load_store = false;
if ((rp_att->aspects & ~VK_IMAGE_ASPECT_STENCIL_BIT) &&
rp_att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
need_load_store = true;
if ((rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
rp_att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
need_load_store = true;
if (!need_load_store)
return;
const VkRenderingAttachmentInfo att = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.imageView = vk_image_view_to_handle(att_state->image_view),
.imageLayout = layout,
.loadOp = rp_att->load_op,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.clearValue = att_state->clear_value,
};
const VkRenderingAttachmentInfo stencil_att = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.imageView = vk_image_view_to_handle(att_state->image_view),
.imageLayout = stencil_layout,
.loadOp = rp_att->stencil_load_op,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.clearValue = att_state->clear_value,
};
VkRenderingInfo render = {
.sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
.renderArea = cmd_buffer->render_area,
.layerCount = pass->is_multiview ? 1 : framebuffer->layers,
.viewMask = pass->is_multiview ? view_mask : 0,
};
if (rp_att->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT)) {
if (rp_att->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
render.pDepthAttachment = &att;
if (rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
render.pStencilAttachment = &stencil_att;
} else {
render.colorAttachmentCount = 1;
render.pColorAttachments = &att;
}
disp->CmdBeginRendering(vk_command_buffer_to_handle(cmd_buffer), &render);
disp->CmdEndRendering(vk_command_buffer_to_handle(cmd_buffer));
}
static void
begin_subpass(struct vk_command_buffer *cmd_buffer,
const VkSubpassBeginInfo *begin_info)
{
const struct vk_render_pass *pass = cmd_buffer->render_pass;
const struct vk_framebuffer *framebuffer = cmd_buffer->framebuffer;
const uint32_t subpass_idx = cmd_buffer->subpass_idx;
assert(subpass_idx < pass->subpass_count);
const struct vk_subpass *subpass = &pass->subpasses[subpass_idx];
struct vk_device_dispatch_table *disp =
&cmd_buffer->base.device->dispatch_table;
/* First, we figure out all our attachments and attempt to handle image
* layout transitions and load ops as part of vkCmdBeginRendering if we
* can. For any we can't handle this way, we'll need explicit barriers
* or quick vkCmdBegin/EndRendering to do the load op.
*/
STACK_ARRAY(VkRenderingAttachmentInfo, color_attachments,
subpass->color_count);
for (uint32_t i = 0; i < subpass->color_count; i++) {
const struct vk_subpass_attachment *sp_att =
&subpass->color_attachments[i];
VkRenderingAttachmentInfo *color_attachment = &color_attachments[i];
if (sp_att->attachment == VK_ATTACHMENT_UNUSED) {
*color_attachment = (VkRenderingAttachmentInfo) {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.imageView = VK_NULL_HANDLE,
};
continue;
}
assert(sp_att->attachment < pass->attachment_count);
const struct vk_render_pass_attachment *rp_att =
&pass->attachments[sp_att->attachment];
struct vk_attachment_state *att_state =
&cmd_buffer->attachments[sp_att->attachment];
*color_attachment = (VkRenderingAttachmentInfo) {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.imageView = vk_image_view_to_handle(att_state->image_view),
.imageLayout = sp_att->layout,
};
if (!(subpass->view_mask & att_state->views_loaded)) {
/* None of these views have been used before */
color_attachment->loadOp = rp_att->load_op;
color_attachment->clearValue = att_state->clear_value;
att_state->views_loaded |= subpass->view_mask;
} else {
/* We've seen at least one of the views of this attachment before so
* we need to LOAD_OP_LOAD.
*/
color_attachment->loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
}
if (!(subpass->view_mask & ~sp_att->last_subpass)) {
/* This is the last subpass for every view */
color_attachment->storeOp = rp_att->store_op;
} else {
/* For at least one of our views, this isn't the last subpass
*
* In the edge case where we have lots of weird overlap between view
* masks of different subThis may mean that we get STORE_OP_STORE in
* some places where it may have wanted STORE_OP_NONE but that should
* be harmless.
*/
color_attachment->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
}
if (sp_att->resolve != NULL) {
assert(sp_att->resolve->attachment < pass->attachment_count);
struct vk_attachment_state *res_att_state =
&cmd_buffer->attachments[sp_att->resolve->attachment];
/* Resolve attachments are entirely overwritten by the resolve
* operation so the load op really doesn't matter. We can consider
* the resolve as being the load.
*/
res_att_state->views_loaded |= subpass->view_mask;
if (vk_format_is_int(res_att_state->image_view->format))
color_attachment->resolveMode = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT;
else
color_attachment->resolveMode = VK_RESOLVE_MODE_AVERAGE_BIT;
color_attachment->resolveImageView =
vk_image_view_to_handle(res_att_state->image_view);
color_attachment->resolveImageLayout = sp_att->resolve->layout;
}
}
VkRenderingAttachmentInfo depth_attachment = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
};
VkRenderingAttachmentInfo stencil_attachment = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
};
if (subpass->depth_stencil_attachment != NULL) {
const struct vk_subpass_attachment *sp_att =
subpass->depth_stencil_attachment;
assert(sp_att->attachment < pass->attachment_count);
const struct vk_render_pass_attachment *rp_att =
&pass->attachments[sp_att->attachment];
struct vk_attachment_state *att_state =
&cmd_buffer->attachments[sp_att->attachment];
assert(sp_att->aspects == rp_att->aspects);
if (rp_att->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
depth_attachment.imageView =
vk_image_view_to_handle(att_state->image_view);
depth_attachment.imageLayout = sp_att->layout;
}
if (rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
stencil_attachment.imageView =
vk_image_view_to_handle(att_state->image_view);
stencil_attachment.imageLayout = sp_att->stencil_layout;
}
if (!(subpass->view_mask & att_state->views_loaded)) {
/* None of these views have been used before */
depth_attachment.loadOp = rp_att->load_op;
depth_attachment.clearValue = att_state->clear_value;
stencil_attachment.loadOp = rp_att->stencil_load_op;
stencil_attachment.clearValue = att_state->clear_value;
att_state->views_loaded |= subpass->view_mask;
} else {
/* We've seen at least one of the views of this attachment before so
* we need to LOAD_OP_LOAD.
*/
depth_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
stencil_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
}
if (!(subpass->view_mask & ~sp_att->last_subpass)) {
/* This is the last subpass for every view */
depth_attachment.storeOp = rp_att->store_op;
stencil_attachment.storeOp = rp_att->stencil_store_op;
} else {
/* For at least one of our views, this isn't the last subpass
*
* In the edge case where we have lots of weird overlap between view
* masks of different subThis may mean that we get STORE_OP_STORE in
* some places where it may have wanted STORE_OP_NONE but that should
* be harmless.
*/
depth_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
stencil_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
}
if (sp_att->resolve != NULL) {
const struct vk_subpass_attachment *res_sp_att = sp_att->resolve;
assert(res_sp_att->attachment < pass->attachment_count);
const struct vk_render_pass_attachment *res_rp_att =
&pass->attachments[res_sp_att->attachment];
struct vk_attachment_state *res_att_state =
&cmd_buffer->attachments[res_sp_att->attachment];
/* From the Vulkan 1.3.204 spec:
*
* "VkSubpassDescriptionDepthStencilResolve::depthResolveMode is
* ignored if the VkFormat of the pDepthStencilResolveAttachment
* does not have a depth component. Similarly,
* VkSubpassDescriptionDepthStencilResolve::stencilResolveMode is
* ignored if the VkFormat of the pDepthStencilResolveAttachment
* does not have a stencil component."
*
* TODO: Should we handle this here or when we create the render
* pass? Handling it here makes load ops "correct" in the sense
* that, if we resolve to the wrong aspect, we will still consider
* it bound and clear it if requested.
*/
VkResolveModeFlagBitsKHR depth_resolve_mode = VK_RESOLVE_MODE_NONE;
if (res_rp_att->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
depth_resolve_mode = subpass->depth_resolve_mode;
VkResolveModeFlagBitsKHR stencil_resolve_mode = VK_RESOLVE_MODE_NONE;
if (res_rp_att->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
stencil_resolve_mode = subpass->stencil_resolve_mode;
VkImageAspectFlags resolved_aspects = 0;
if (depth_resolve_mode != VK_RESOLVE_MODE_NONE) {
depth_attachment.resolveMode = depth_resolve_mode;
depth_attachment.resolveImageView =
vk_image_view_to_handle(res_att_state->image_view);
depth_attachment.resolveImageLayout =
sp_att->resolve->layout;
resolved_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
}
if (stencil_resolve_mode != VK_RESOLVE_MODE_NONE) {
stencil_attachment.resolveMode = stencil_resolve_mode;
stencil_attachment.resolveImageView =
vk_image_view_to_handle(res_att_state->image_view);
stencil_attachment.resolveImageLayout =
sp_att->resolve->stencil_layout;
resolved_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
}
if (resolved_aspects == rp_att->aspects) {
/* The resolve attachment is entirely overwritten by the
* resolve operation so the load op really doesn't matter.
* We can consider the resolve as being the load.
*/
res_att_state->views_loaded |= subpass->view_mask;
}
}
}
/* Next, handle any barriers we need. This may include a general
* VkMemoryBarrier for subpass dependencies and it may include some
* number of VkImageMemoryBarriers for layout transitions.
*/
bool needs_mem_barrier = false;
VkMemoryBarrier2 mem_barrier = {
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2,
};
for (uint32_t d = 0; d < pass->dependency_count; d++) {
const struct vk_subpass_dependency *dep = &pass->dependencies[d];
if (dep->dst_subpass != subpass_idx)
continue;
if (dep->flags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
/* From the Vulkan 1.3.204 spec:
*
* VUID-VkSubpassDependency2-dependencyFlags-03091
*
* "If dependencyFlags includes VK_DEPENDENCY_VIEW_LOCAL_BIT,
* dstSubpass must not be equal to VK_SUBPASS_EXTERNAL"
*/
assert(dep->src_subpass != VK_SUBPASS_EXTERNAL);
assert(dep->src_subpass < pass->subpass_count);
const struct vk_subpass *src_subpass =
&pass->subpasses[dep->src_subpass];
/* Figure out the set of views in the source subpass affected by this
* dependency.
*/
uint32_t src_dep_view_mask = subpass->view_mask;
if (dep->view_offset >= 0)
src_dep_view_mask <<= dep->view_offset;
else
src_dep_view_mask >>= -dep->view_offset;
/* From the Vulkan 1.3.204 spec:
*
* "If the dependency is view-local, then each view (dstView) in
* the destination subpass depends on the view dstView +
* pViewOffsets[dependency] in the source subpass. If there is not
* such a view in the source subpass, then this dependency does
* not affect that view in the destination subpass."
*/
if (!(src_subpass->view_mask & src_dep_view_mask))
continue;
}
needs_mem_barrier = true;
mem_barrier.srcStageMask |= dep->src_stage_mask;
mem_barrier.srcAccessMask |= dep->src_access_mask;
mem_barrier.dstStageMask |= dep->dst_stage_mask;
mem_barrier.dstAccessMask |= dep->dst_access_mask;
}
uint32_t max_image_barrier_count = 0;
for (uint32_t a = 0; a < subpass->attachment_count; a++) {
const struct vk_subpass_attachment *sp_att = &subpass->attachments[a];
if (sp_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
assert(sp_att->attachment < pass->attachment_count);
const struct vk_render_pass_attachment *rp_att =
&pass->attachments[sp_att->attachment];
max_image_barrier_count += util_bitcount(subpass->view_mask) *
util_bitcount(rp_att->aspects);
}
STACK_ARRAY(VkImageMemoryBarrier2, image_barriers, max_image_barrier_count);
uint32_t image_barrier_count = 0;
for (uint32_t a = 0; a < subpass->attachment_count; a++) {
const struct vk_subpass_attachment *sp_att = &subpass->attachments[a];
if (sp_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
/* If we're using an initial layout, the attachment will already be
* marked as transitioned and this will be a no-op.
*/
transition_attachment(cmd_buffer, sp_att->attachment,
subpass->view_mask,
sp_att->layout, sp_att->stencil_layout,
&image_barrier_count,
max_image_barrier_count,
image_barriers);
}
assert(image_barrier_count <= max_image_barrier_count);
if (needs_mem_barrier || image_barrier_count > 0) {
const VkDependencyInfo dependency_info = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.dependencyFlags = 0,
.memoryBarrierCount = needs_mem_barrier ? 1 : 0,
.pMemoryBarriers = needs_mem_barrier ? &mem_barrier : NULL,
.imageMemoryBarrierCount = image_barrier_count,
.pImageMemoryBarriers = image_barrier_count > 0 ?
image_barriers : NULL,
};
disp->CmdPipelineBarrier2(vk_command_buffer_to_handle(cmd_buffer),
&dependency_info);
}
STACK_ARRAY_FINISH(image_barriers);
/* Next, handle any VK_ATTACHMENT_LOAD_OP_CLEAR that we couldn't handle
* directly by emitting a quick vkCmdBegin/EndRendering to do the load.
*/
for (uint32_t a = 0; a < subpass->attachment_count; a++) {
const struct vk_subpass_attachment *sp_att = &subpass->attachments[a];
if (sp_att->attachment == VK_ATTACHMENT_UNUSED)
continue;
load_attachment(cmd_buffer, sp_att->attachment, subpass->view_mask,
sp_att->layout, sp_att->stencil_layout);
}
/* TODO: Handle preserve attachments
*
* For immediate renderers, this isn't a big deal as LOAD_OP_LOAD and
* STORE_OP_STORE are effectively free. However, before this gets used on
* a tiling GPU, we should really hook up preserve attachments and use them
* to determine when we can use LOAD/STORE_OP_DONT_CARE between subpasses.
*/
const VkRenderingInfo rendering = {
.sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
.renderArea = cmd_buffer->render_area,
.layerCount = pass->is_multiview ? 1 : framebuffer->layers,
.viewMask = pass->is_multiview ? subpass->view_mask : 0,
.colorAttachmentCount = subpass->color_count,
.pColorAttachments = color_attachments,
.pDepthAttachment = &depth_attachment,
.pStencilAttachment = &stencil_attachment,
};
disp->CmdBeginRendering(vk_command_buffer_to_handle(cmd_buffer),
&rendering);
STACK_ARRAY_FINISH(color_attachments);
}
static void
end_subpass(struct vk_command_buffer *cmd_buffer,
const VkSubpassEndInfo *end_info)
{
struct vk_device_dispatch_table *disp =
&cmd_buffer->base.device->dispatch_table;
disp->CmdEndRendering(vk_command_buffer_to_handle(cmd_buffer));
}
VKAPI_ATTR void VKAPI_CALL
vk_common_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBeginInfo,
const VkSubpassBeginInfo *pSubpassBeginInfo)
{
VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(vk_render_pass, pass, pRenderPassBeginInfo->renderPass);
VK_FROM_HANDLE(vk_framebuffer, framebuffer,
pRenderPassBeginInfo->framebuffer);
assert(cmd_buffer->render_pass == NULL);
cmd_buffer->render_pass = pass;
cmd_buffer->subpass_idx = 0;
assert(cmd_buffer->framebuffer == NULL);
cmd_buffer->framebuffer = framebuffer;
cmd_buffer->render_area = pRenderPassBeginInfo->renderArea;
assert(cmd_buffer->attachments == NULL);
if (pass->attachment_count > ARRAY_SIZE(cmd_buffer->_attachments)) {
cmd_buffer->attachments = malloc(pass->attachment_count *
sizeof(*cmd_buffer->attachments));
} else {
cmd_buffer->attachments = cmd_buffer->_attachments;
}
const VkRenderPassAttachmentBeginInfo *attach_begin =
vk_find_struct_const(pRenderPassBeginInfo,
RENDER_PASS_ATTACHMENT_BEGIN_INFO);
if (!attach_begin)
assert(pass->attachment_count == framebuffer->attachment_count);
const VkImageView *image_views;
if (attach_begin && attach_begin->attachmentCount != 0) {
assert(attach_begin->attachmentCount == pass->attachment_count);
image_views = attach_begin->pAttachments;
} else {
assert(framebuffer->attachment_count >= pass->attachment_count);
image_views = framebuffer->attachments;
}
for (uint32_t a = 0; a < pass->attachment_count; ++a) {
VK_FROM_HANDLE(vk_image_view, image_view, image_views[a]);
const struct vk_render_pass_attachment *pass_att = &pass->attachments[a];
struct vk_attachment_state *att_state = &cmd_buffer->attachments[a];
/* From the Vulkan 1.3.204 spec:
*
* VUID-VkRenderPassBeginInfo-framebuffer-03216
*
* "If framebuffer was created with a VkFramebufferCreateInfo::flags
* value that included VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, each
* element of the pAttachments member of a
* VkRenderPassAttachmentBeginInfo structure included in the pNext
* chain must be a VkImageView of an image created with a value of
* VkImageViewCreateInfo::format equal to the corresponding value of
* VkAttachmentDescription::format in renderPass"
*/
assert(image_view->format == pass_att->format);
/* From the Vulkan 1.3.204 spec:
*
* UID-VkRenderPassBeginInfo-framebuffer-03217
*
* "If framebuffer was created with a VkFramebufferCreateInfo::flags
* value that included VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, each
* element of the pAttachments member of a
* VkRenderPassAttachmentBeginInfo structure included in the pNext
* chain must be a VkImageView of an image created with a value of
* VkImageCreateInfo::samples equal to the corresponding value of
* VkAttachmentDescription::samples in renderPass"
*/
assert(image_view->image->samples == pass_att->samples);
assert(util_last_bit(pass_att->view_mask) <= image_view->layer_count);
*att_state = (struct vk_attachment_state) {
.image_view = image_view,
.views_loaded = 0,
};
for (uint32_t v = 0; v < MESA_VK_MAX_MULTIVIEW_VIEW_COUNT; v++) {
att_state->views[v] = (struct vk_attachment_view_state) {
.layout = pass_att->initial_layout,
.stencil_layout = pass_att->initial_stencil_layout,
};
}
if (a < pRenderPassBeginInfo->clearValueCount)
att_state->clear_value = pRenderPassBeginInfo->pClearValues[a];
}
begin_subpass(cmd_buffer, pSubpassBeginInfo);
}
void
vk_command_buffer_reset_render_pass(struct vk_command_buffer *cmd_buffer)
{
cmd_buffer->render_pass = NULL;
cmd_buffer->subpass_idx = 0;
cmd_buffer->framebuffer = NULL;
if (cmd_buffer->attachments != cmd_buffer->_attachments)
free(cmd_buffer->attachments);
cmd_buffer->attachments = NULL;
}
VKAPI_ATTR void VKAPI_CALL
vk_common_CmdNextSubpass2(VkCommandBuffer commandBuffer,
const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo)
{
VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
end_subpass(cmd_buffer, pSubpassEndInfo);
cmd_buffer->subpass_idx++;
begin_subpass(cmd_buffer, pSubpassBeginInfo);
}
VKAPI_ATTR void VKAPI_CALL
vk_common_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
const VkSubpassEndInfo *pSubpassEndInfo)
{
VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
const struct vk_render_pass *pass = cmd_buffer->render_pass;
struct vk_device_dispatch_table *disp =
&cmd_buffer->base.device->dispatch_table;
end_subpass(cmd_buffer, pSubpassEndInfo);
/* Make sure all our attachments end up in their finalLayout */
uint32_t max_image_barrier_count = 0;
for (uint32_t a = 0; a < pass->attachment_count; a++) {
const struct vk_render_pass_attachment *rp_att = &pass->attachments[a];
max_image_barrier_count += util_bitcount(pass->view_mask) *
util_bitcount(rp_att->aspects);
}
STACK_ARRAY(VkImageMemoryBarrier2, image_barriers, max_image_barrier_count);
uint32_t image_barrier_count = 0;
for (uint32_t a = 0; a < pass->attachment_count; a++) {
const struct vk_render_pass_attachment *rp_att = &pass->attachments[a];
transition_attachment(cmd_buffer, a, pass->view_mask,
rp_att->final_layout,
rp_att->final_stencil_layout,
&image_barrier_count,
max_image_barrier_count,
image_barriers);
}
assert(image_barrier_count <= max_image_barrier_count);
if (image_barrier_count > 0) {
const VkDependencyInfo dependency_info = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.dependencyFlags = 0,
.imageMemoryBarrierCount = image_barrier_count,
.pImageMemoryBarriers = image_barriers,
};
disp->CmdPipelineBarrier2(vk_command_buffer_to_handle(cmd_buffer),
&dependency_info);
}
STACK_ARRAY_FINISH(image_barriers);
vk_command_buffer_reset_render_pass(cmd_buffer);
}

View File

@ -58,6 +58,14 @@ struct vk_subpass_attachment {
*/
VkImageLayout stencil_layout;
/** A per-view mask for if this is the last use of this attachment
*
* If the same render pass attachment is used multiple ways within a
* subpass, corresponding last_subpass bits will be set in all of them.
* For the non-multiview case, only the first bit is used.
*/
uint32_t last_subpass;
/** Resolve attachment, if any */
struct vk_subpass_attachment *resolve;
};
@ -93,7 +101,12 @@ struct vk_subpass {
/** VkSubpassDescriptionDepthStencilResolve::pDepthStencilResolveAttachment */
struct vk_subpass_attachment *depth_stencil_resolve_attachment;
/** VkSubpassDescription2::viewMask */
/** VkSubpassDescription2::viewMask or 1 for non-multiview
*
* For all view masks in the vk_render_pass data structure, we use a mask
* of 1 for non-multiview instead of a mask of 0. To determine if the
* render pass is multiview or not, see vk_render_pass::is_multiview.
*/
uint32_t view_mask;
/** VkSubpassDescriptionDepthStencilResolve::depthResolveMode */
@ -113,6 +126,12 @@ struct vk_render_pass_attachment {
/** VkAttachmentDescription2::samples */
uint32_t samples;
/** Views in which this attachment is used, 0 for unused
*
* For non-multiview, this will be 1 if the attachment is used.
*/
uint32_t view_mask;
/** VkAttachmentDescription2::loadOp */
VkAttachmentLoadOp load_op;
@ -177,6 +196,15 @@ struct vk_subpass_dependency {
struct vk_render_pass {
struct vk_object_base base;
/** True if this render pass uses multiview
*
* This is true if all subpasses have viewMask != 0.
*/
bool is_multiview;
/** Views used by this render pass or 1 for non-multiview */
uint32_t view_mask;
/** VkRenderPassCreateInfo2::attachmentCount */
uint32_t attachment_count;