From 470db8b78186efe840b6452c6c4934178058059e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 8 May 2018 20:39:47 +1000 Subject: [PATCH] drm/nouveau/gem: tie deferred unmapping of buffers to VMA fence completion As VMAs are per-client, unlike buffers, this allows us to avoid referencing foreign fences (those that belong to another client/driver) from the client deferred work handler, and prevent some not-fun race conditions that can be triggered when a fence stalls. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_gem.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2016d9eb338e..300daee74209 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -115,25 +115,12 @@ nouveau_gem_object_delete_work(struct nouveau_cli_work *w) static void nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) { - const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; - struct reservation_object *resv = nvbo->bo.resv; - struct reservation_object_list *fobj; + struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL; struct nouveau_gem_object_unmap *work; - struct dma_fence *fence = NULL; - - fobj = reservation_object_get_list(resv); list_del_init(&vma->head); - if (fobj && fobj->shared_count > 1) - ttm_bo_wait(&nvbo->bo, false, false); - else if (fobj && fobj->shared_count == 1) - fence = rcu_dereference_protected(fobj->shared[0], - reservation_object_held(resv)); - else - fence = reservation_object_get_excl(nvbo->bo.resv); - - if (!fence || !mapped) { + if (!fence) { nouveau_gem_object_delete(vma); return; }