drm/nouveau/vm: take subdev mutex, not the mm, protects against race with vm/nvc0

nvc0_vm_flush() accesses the pgd list, which will soon be able to race
with vm_unlink() during channel destruction.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2013-05-13 22:26:26 +10:00
parent 15cace5917
commit 4e67bee8e1

View File

@ -236,9 +236,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
}
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
nouveau_gpuobj_ref(NULL, &pgt);
mutex_lock(&vm->mm.mutex);
mutex_lock(&nv_subdev(vmm)->mutex);
}
}
@ -256,18 +256,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
pgt_size *= 8;
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &pgt);
mutex_lock(&vm->mm.mutex);
mutex_lock(&nv_subdev(vmm)->mutex);
if (unlikely(ret))
return ret;
/* someone beat us to filling the PDE while we didn't have the lock */
if (unlikely(vpgt->refcount[big]++)) {
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
nouveau_gpuobj_ref(NULL, &pgt);
mutex_lock(&vm->mm.mutex);
mutex_lock(&nv_subdev(vmm)->mutex);
return 0;
}
@ -289,11 +289,11 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
u32 fpde, lpde, pde;
int ret;
mutex_lock(&vm->mm.mutex);
mutex_lock(&nv_subdev(vmm)->mutex);
ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
&vma->node);
if (unlikely(ret != 0)) {
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
return ret;
}
@ -314,11 +314,11 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
if (pde != fpde)
nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
nouveau_mm_free(&vm->mm, &vma->node);
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
return ret;
}
}
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
vma->vm = vm;
vma->offset = (u64)vma->node->offset << 12;
@ -338,10 +338,10 @@ nouveau_vm_put(struct nouveau_vma *vma)
fpde = (vma->node->offset >> vmm->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
mutex_lock(&vm->mm.mutex);
mutex_lock(&nv_subdev(vmm)->mutex);
nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
nouveau_mm_free(&vm->mm, &vma->node);
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
}
int
@ -405,24 +405,25 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
nouveau_gpuobj_ref(pgd, &vpgd->obj);
mutex_lock(&vm->mm.mutex);
mutex_lock(&nv_subdev(vmm)->mutex);
for (i = vm->fpde; i <= vm->lpde; i++)
vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
list_add(&vpgd->head, &vm->pgd_list);
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
return 0;
}
static void
nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
{
struct nouveau_vmmgr *vmm = vm->vmm;
struct nouveau_vm_pgd *vpgd, *tmp;
struct nouveau_gpuobj *pgd = NULL;
if (!mpgd)
return;
mutex_lock(&vm->mm.mutex);
mutex_lock(&nv_subdev(vmm)->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj == mpgd) {
pgd = vpgd->obj;
@ -431,7 +432,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
break;
}
}
mutex_unlock(&vm->mm.mutex);
mutex_unlock(&nv_subdev(vmm)->mutex);
nouveau_gpuobj_ref(NULL, &pgd);
}