zink: rework cached fbfetch descriptor fallback

this ended up being a little trickier than I thought; lazy
descriptors don't use dynamic ubo types for the push set,
which means drivers that (correctly) assert dynamic offset existence
explode because the descriptor template will never work with the
push set

the better, though slightly more annoying, option here is to use the
lazy manager's faster descriptor allocation and lesser complexity to
quickly grab a push set, then tweak the existing cached codepath slightly
in order to update a raw vkdescriptorset

Fixes: 417477f60ed ("zink: always use lazy (non-push) updating for fbfetch descriptors")

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13677>
This commit is contained in:
Mike Blumenkrantz 2021-11-04 11:24:05 -04:00 committed by Marge Bot
parent 2d1f5e3dcb
commit 8c37cd8860
3 changed files with 29 additions and 34 deletions

View File

@ -1207,7 +1207,7 @@ desc_set_descriptor_surface_add(struct zink_context *ctx, struct zink_descriptor
}
static unsigned
init_write_descriptor(struct zink_shader *shader, struct zink_descriptor_set *zds, enum zink_descriptor_type type, int idx, VkWriteDescriptorSet *wd, unsigned num_wds)
init_write_descriptor(struct zink_shader *shader, VkDescriptorSet desc_set, enum zink_descriptor_type type, int idx, VkWriteDescriptorSet *wd, unsigned num_wds)
{
wd->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
wd->pNext = NULL;
@ -1216,12 +1216,13 @@ init_write_descriptor(struct zink_shader *shader, struct zink_descriptor_set *zd
wd->descriptorCount = shader ? shader->bindings[type][idx].size : 1;
wd->descriptorType = shader ? shader->bindings[type][idx].type :
idx == ZINK_FBFETCH_BINDING ? VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
wd->dstSet = zds->desc_set;
wd->dstSet = desc_set;
return num_wds + 1;
}
static unsigned
update_push_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
VkDescriptorSet desc_set,
bool is_compute, bool cache_hit, uint32_t *dynamic_offsets)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
@ -1253,13 +1254,15 @@ update_push_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set
const bool used = (pg->dd->push_usage & BITFIELD_BIT(pstage)) == BITFIELD_BIT(pstage);
dynamic_offsets[dynamic_idx] = used ? info->offset : 0;
if (!cache_hit) {
init_write_descriptor(NULL, zds, ZINK_DESCRIPTOR_TYPE_UBO, tgsi_processor_to_shader_stage(pstage), &wds[i], 0);
init_write_descriptor(NULL, desc_set, ZINK_DESCRIPTOR_TYPE_UBO, tgsi_processor_to_shader_stage(pstage), &wds[i], 0);
if (used) {
desc_set_res_add(zds, ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_UBO][pstage][0], i, cache_hit);
if (zds)
desc_set_res_add(zds, ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_UBO][pstage][0], i, cache_hit);
buffer_infos[i].buffer = info->buffer;
buffer_infos[i].range = info->range;
} else {
desc_set_res_add(zds, NULL, i, cache_hit);
if (zds)
desc_set_res_add(zds, NULL, i, cache_hit);
if (unlikely(!screen->info.rb2_feats.nullDescriptor))
buffer_infos[i].buffer = zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
else
@ -1272,10 +1275,7 @@ update_push_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set
}
}
if (unlikely(!cache_hit && !is_compute && ctx->dd->has_fbfetch)) {
assert(!pg->dd->fbfetch);
struct zink_resource *res = zink_resource(ctx->dummy_surface[0]->texture);
init_write_descriptor(NULL, zds, 0, MESA_SHADER_STAGES, &wds[ZINK_SHADER_COUNT], 0);
desc_set_res_add(zds, res, ZINK_SHADER_COUNT, cache_hit);
init_write_descriptor(NULL, desc_set, 0, MESA_SHADER_STAGES, &wds[ZINK_SHADER_COUNT], 0);
wds[ZINK_SHADER_COUNT].pImageInfo = &ctx->di.fbfetch;
fbfetch = true;
}
@ -1390,7 +1390,7 @@ update_descriptors_internal(struct zink_context *ctx, enum zink_descriptor_type
default:
unreachable("unknown descriptor type");
}
num_wds = init_write_descriptor(shader, zds, type, j, &wds[num_wds], num_wds);
num_wds = init_write_descriptor(shader, zds->desc_set, type, j, &wds[num_wds], num_wds);
}
}
if (num_wds)
@ -1409,32 +1409,35 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute)
zink_context_update_descriptor_states(ctx, pg);
bool cache_hit;
VkDescriptorSet desc_set;
struct zink_descriptor_set *zds;
VkDescriptorSet desc_set = VK_NULL_HANDLE;
struct zink_descriptor_set *zds = NULL;
struct zink_batch *batch = &ctx->batch;
VkPipelineBindPoint bp = is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS;
if (unlikely(pg->dd->fbfetch)) {
/* this is not cacheable */
zink_descriptors_update_lazy_push(ctx);
} else {
{
uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
unsigned dynamic_offset_idx = 0;
/* push set is indexed in vulkan as 0 but isn't in the general pool array */
ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES] |= ctx->dd->pg[is_compute] != pg;
if (pg->dd->push_usage) {
zds = zink_descriptor_set_get(ctx, ZINK_DESCRIPTOR_TYPES, is_compute, &cache_hit);
if (pg->dd->fbfetch) {
/* fbfetch is not cacheable: grab a lazy set because it's faster */
desc_set = zink_descriptors_alloc_lazy_push(ctx);
} else {
zds = zink_descriptor_set_get(ctx, ZINK_DESCRIPTOR_TYPES, is_compute, &cache_hit);
desc_set = zds ? zds->desc_set : VK_NULL_HANDLE;
}
} else {
zds = NULL;
cache_hit = false;
}
ctx->dd->changed[is_compute][ZINK_DESCRIPTOR_TYPES] = false;
desc_set = zds ? zds->desc_set : ctx->dd->dummy_set;
if (!desc_set)
desc_set = ctx->dd->dummy_set;
if (pg->dd->push_usage) // push set
dynamic_offset_idx = update_push_ubo_descriptors(ctx, zds,
dynamic_offset_idx = update_push_ubo_descriptors(ctx, zds, desc_set,
is_compute, cache_hit, dynamic_offsets);
VKCTX(CmdBindDescriptorSets)(batch->state->cmdbuf, bp,
pg->layout, 0, 1, &desc_set,

View File

@ -300,8 +300,8 @@ void
zink_descriptor_set_update_lazy(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, VkDescriptorSet set);
void
zink_descriptors_update_lazy_masked(struct zink_context *ctx, bool is_compute, uint8_t changed_sets, uint8_t bind_sets);
void
zink_descriptors_update_lazy_push(struct zink_context *ctx);
VkDescriptorSet
zink_descriptors_alloc_lazy_push(struct zink_context *ctx);
#ifdef __cplusplus
}
#endif

View File

@ -526,12 +526,11 @@ zink_descriptors_update_lazy_masked(struct zink_context *ctx, bool is_compute, u
}
/* only called by cached manager for fbfetch handling */
void
zink_descriptors_update_lazy_push(struct zink_context *ctx)
VkDescriptorSet
zink_descriptors_alloc_lazy_push(struct zink_context *ctx)
{
struct zink_batch_state *bs = ctx->batch.state;
struct zink_batch_descriptor_data_lazy *bdd = bdd_lazy(bs);
struct zink_program *pg = &ctx->curr_program->base;
struct zink_screen *screen = zink_screen(ctx->base.screen);
VkDescriptorSet push_set = VK_NULL_HANDLE;
if (!bdd->push_pool[0]) {
@ -540,16 +539,9 @@ zink_descriptors_update_lazy_push(struct zink_context *ctx)
}
struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd->push_pool[0], bdd, false);
push_set = get_descriptor_set_lazy(pool);
if (!push_set) {
if (!push_set)
mesa_loge("ZINK: failed to get push descriptor set!");
/* just jam something in to avoid a hang */
push_set = ctx->dd->dummy_set;
}
VKCTX(UpdateDescriptorSetWithTemplate)(screen->dev, push_set, pg->dd->push_template, ctx);
VKCTX(CmdBindDescriptorSets)(bs->cmdbuf,
VK_PIPELINE_BIND_POINT_GRAPHICS,
pg->layout, 0, 1, push_set ? &push_set : &bdd->sets[0][0],
0, NULL);
return push_set;
}
void