mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-21 17:00:01 +00:00
[POWERPC] spufs: block fault handlers in spu_acquire_runnable
This change disables the logic that faults-in spu contexts under the covers from the page fault handler. When a fault requires a runnable context, the handler will block until the context is scheduled by other means. Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
7cd58e4381
commit
33bfd7a738
@ -52,6 +52,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
|
||||
init_waitqueue_head(&ctx->wbox_wq);
|
||||
init_waitqueue_head(&ctx->stop_wq);
|
||||
init_waitqueue_head(&ctx->mfc_wq);
|
||||
init_waitqueue_head(&ctx->run_wq);
|
||||
ctx->state = SPU_STATE_SAVED;
|
||||
ctx->ops = &spu_backing_ops;
|
||||
ctx->owner = get_task_mm(current);
|
||||
|
@ -236,21 +236,31 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
|
||||
{
|
||||
struct spu_context *ctx = vma->vm_file->private_data;
|
||||
unsigned long area, offset = address - vma->vm_start;
|
||||
int ret;
|
||||
|
||||
offset += vma->vm_pgoff << PAGE_SHIFT;
|
||||
if (offset >= ps_size)
|
||||
return NOPFN_SIGBUS;
|
||||
|
||||
/* error here usually means a signal.. we might want to test
|
||||
* the error code more precisely though
|
||||
/*
|
||||
* We have to wait for context to be loaded before we have
|
||||
* pages to hand out to the user, but we don't want to wait
|
||||
* with the mmap_sem held.
|
||||
* It is possible to drop the mmap_sem here, but then we need
|
||||
* to return NOPFN_REFAULT because the mappings may have
|
||||
* hanged.
|
||||
*/
|
||||
ret = spu_acquire_runnable(ctx, 0);
|
||||
if (ret)
|
||||
return NOPFN_REFAULT;
|
||||
spu_acquire(ctx);
|
||||
if (ctx->state == SPU_STATE_SAVED) {
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
goto out;
|
||||
}
|
||||
|
||||
area = ctx->spu->problem_phys + ps_offs;
|
||||
vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
|
||||
|
||||
out:
|
||||
spu_release(ctx);
|
||||
|
||||
return NOPFN_REFAULT;
|
||||
@ -1505,7 +1515,8 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = spu_acquire_runnable(ctx, 0);
|
||||
spu_acquire(ctx);
|
||||
ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -682,6 +682,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
|
||||
spu_bind_context(spu, ctx);
|
||||
cbe_spu_info[node].nr_active++;
|
||||
mutex_unlock(&cbe_spu_info[node].list_mutex);
|
||||
wake_up_all(&ctx->run_wq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -71,6 +71,7 @@ struct spu_context {
|
||||
wait_queue_head_t wbox_wq;
|
||||
wait_queue_head_t stop_wq;
|
||||
wait_queue_head_t mfc_wq;
|
||||
wait_queue_head_t run_wq;
|
||||
struct fasync_struct *ibox_fasync;
|
||||
struct fasync_struct *wbox_fasync;
|
||||
struct fasync_struct *mfc_fasync;
|
||||
|
Loading…
Reference in New Issue
Block a user