mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-22 09:22:37 +00:00
[POWERPC] spufs: move fault, lscsa_alloc and switch code to spufs module
Currently, part of the spufs code (switch.o, lscsa_alloc.o and fault.o) is compiled directly into the kernel. This change moves these components of spufs into the kernel. The lscsa and switch objects are fairly straightforward to move in. For the fault.o module, we split the fault-handling code into two parts: a/p/p/c/spu_fault.c and a/p/p/c/spufs/fault.c. The former is for the in-kernel spu_handle_mm_fault function, and we move the rest of the fault-handling code into spufs. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
9b1d21f858
commit
7cd58e4381
@ -19,7 +19,7 @@ spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o
|
|||||||
spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o
|
spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o
|
||||||
|
|
||||||
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
|
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
|
||||||
spu_syscalls.o \
|
spu_syscalls.o spu_fault.o \
|
||||||
$(spu-priv1-y) \
|
$(spu-priv1-y) \
|
||||||
$(spu-manage-y) \
|
$(spu-manage-y) \
|
||||||
spufs/
|
spufs/
|
||||||
|
98
arch/powerpc/platforms/cell/spu_fault.c
Normal file
98
arch/powerpc/platforms/cell/spu_fault.c
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
/*
|
||||||
|
* SPU mm fault handler
|
||||||
|
*
|
||||||
|
* (C) Copyright IBM Deutschland Entwicklung GmbH 2007
|
||||||
|
*
|
||||||
|
* Author: Arnd Bergmann <arndb@de.ibm.com>
|
||||||
|
* Author: Jeremy Kerr <jk@ozlabs.org>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation; either version 2, or (at your option)
|
||||||
|
* any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||||
|
*/
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
|
||||||
|
#include <asm/spu.h>
|
||||||
|
#include <asm/spu_csa.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This ought to be kept in sync with the powerpc specific do_page_fault
|
||||||
|
* function. Currently, there are a few corner cases that we haven't had
|
||||||
|
* to handle fortunately.
|
||||||
|
*/
|
||||||
|
int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
||||||
|
unsigned long dsisr, unsigned *flt)
|
||||||
|
{
|
||||||
|
struct vm_area_struct *vma;
|
||||||
|
unsigned long is_write;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
if (!IS_VALID_EA(ea)) {
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
#endif /* XXX */
|
||||||
|
if (mm == NULL) {
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
if (mm->pgd == NULL) {
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
down_read(&mm->mmap_sem);
|
||||||
|
vma = find_vma(mm, ea);
|
||||||
|
if (!vma)
|
||||||
|
goto bad_area;
|
||||||
|
if (vma->vm_start <= ea)
|
||||||
|
goto good_area;
|
||||||
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||||
|
goto bad_area;
|
||||||
|
if (expand_stack(vma, ea))
|
||||||
|
goto bad_area;
|
||||||
|
good_area:
|
||||||
|
is_write = dsisr & MFC_DSISR_ACCESS_PUT;
|
||||||
|
if (is_write) {
|
||||||
|
if (!(vma->vm_flags & VM_WRITE))
|
||||||
|
goto bad_area;
|
||||||
|
} else {
|
||||||
|
if (dsisr & MFC_DSISR_ACCESS_DENIED)
|
||||||
|
goto bad_area;
|
||||||
|
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||||
|
goto bad_area;
|
||||||
|
}
|
||||||
|
ret = 0;
|
||||||
|
*flt = handle_mm_fault(mm, vma, ea, is_write);
|
||||||
|
if (unlikely(*flt & VM_FAULT_ERROR)) {
|
||||||
|
if (*flt & VM_FAULT_OOM) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto bad_area;
|
||||||
|
} else if (*flt & VM_FAULT_SIGBUS) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto bad_area;
|
||||||
|
}
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
if (*flt & VM_FAULT_MAJOR)
|
||||||
|
current->maj_flt++;
|
||||||
|
else
|
||||||
|
current->min_flt++;
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bad_area:
|
||||||
|
up_read(&mm->mmap_sem);
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(spu_handle_mm_fault);
|
@ -1,8 +1,8 @@
|
|||||||
obj-y += switch.o fault.o lscsa_alloc.o
|
|
||||||
|
|
||||||
obj-$(CONFIG_SPU_FS) += spufs.o
|
obj-$(CONFIG_SPU_FS) += spufs.o
|
||||||
spufs-y += inode.o file.o context.o syscalls.o coredump.o
|
spufs-y += inode.o file.o context.o syscalls.o coredump.o
|
||||||
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
|
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
|
||||||
|
spufs-y += switch.o fault.o lscsa_alloc.o
|
||||||
|
|
||||||
# Rules to build switch.o with the help of SPU tool chain
|
# Rules to build switch.o with the help of SPU tool chain
|
||||||
SPU_CROSS := spu-
|
SPU_CROSS := spu-
|
||||||
|
@ -28,75 +28,6 @@
|
|||||||
|
|
||||||
#include "spufs.h"
|
#include "spufs.h"
|
||||||
|
|
||||||
/*
|
|
||||||
* This ought to be kept in sync with the powerpc specific do_page_fault
|
|
||||||
* function. Currently, there are a few corner cases that we haven't had
|
|
||||||
* to handle fortunately.
|
|
||||||
*/
|
|
||||||
static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
|
||||||
unsigned long dsisr, unsigned *flt)
|
|
||||||
{
|
|
||||||
struct vm_area_struct *vma;
|
|
||||||
unsigned long is_write;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
if (!IS_VALID_EA(ea)) {
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
#endif /* XXX */
|
|
||||||
if (mm == NULL) {
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
if (mm->pgd == NULL) {
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
|
||||||
vma = find_vma(mm, ea);
|
|
||||||
if (!vma)
|
|
||||||
goto bad_area;
|
|
||||||
if (vma->vm_start <= ea)
|
|
||||||
goto good_area;
|
|
||||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
||||||
goto bad_area;
|
|
||||||
if (expand_stack(vma, ea))
|
|
||||||
goto bad_area;
|
|
||||||
good_area:
|
|
||||||
is_write = dsisr & MFC_DSISR_ACCESS_PUT;
|
|
||||||
if (is_write) {
|
|
||||||
if (!(vma->vm_flags & VM_WRITE))
|
|
||||||
goto bad_area;
|
|
||||||
} else {
|
|
||||||
if (dsisr & MFC_DSISR_ACCESS_DENIED)
|
|
||||||
goto bad_area;
|
|
||||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
|
||||||
goto bad_area;
|
|
||||||
}
|
|
||||||
ret = 0;
|
|
||||||
*flt = handle_mm_fault(mm, vma, ea, is_write);
|
|
||||||
if (unlikely(*flt & VM_FAULT_ERROR)) {
|
|
||||||
if (*flt & VM_FAULT_OOM) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto bad_area;
|
|
||||||
} else if (*flt & VM_FAULT_SIGBUS) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto bad_area;
|
|
||||||
}
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
if (*flt & VM_FAULT_MAJOR)
|
|
||||||
current->maj_flt++;
|
|
||||||
else
|
|
||||||
current->min_flt++;
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
bad_area:
|
|
||||||
up_read(&mm->mmap_sem);
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void spufs_handle_dma_error(struct spu_context *ctx,
|
static void spufs_handle_dma_error(struct spu_context *ctx,
|
||||||
unsigned long ea, int type)
|
unsigned long ea, int type)
|
||||||
{
|
{
|
||||||
@ -138,7 +69,6 @@ void spufs_dma_callback(struct spu *spu, int type)
|
|||||||
{
|
{
|
||||||
spufs_handle_dma_error(spu->ctx, spu->dar, type);
|
spufs_handle_dma_error(spu->ctx, spu->dar, type);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(spufs_dma_callback);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bottom half handler for page faults, we can't do this from
|
* bottom half handler for page faults, we can't do this from
|
||||||
@ -227,4 +157,3 @@ int spufs_handle_class1(struct spu_context *ctx)
|
|||||||
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
|
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(spufs_handle_class1);
|
|
||||||
|
@ -28,6 +28,8 @@
|
|||||||
#include <asm/spu_csa.h>
|
#include <asm/spu_csa.h>
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
|
|
||||||
|
#include "spufs.h"
|
||||||
|
|
||||||
static int spu_alloc_lscsa_std(struct spu_state *csa)
|
static int spu_alloc_lscsa_std(struct spu_state *csa)
|
||||||
{
|
{
|
||||||
struct spu_lscsa *lscsa;
|
struct spu_lscsa *lscsa;
|
||||||
|
@ -892,6 +892,38 @@ static int spusched_thread(void *unused)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void spuctx_switch_state(struct spu_context *ctx,
|
||||||
|
enum spu_utilization_state new_state)
|
||||||
|
{
|
||||||
|
unsigned long long curtime;
|
||||||
|
signed long long delta;
|
||||||
|
struct timespec ts;
|
||||||
|
struct spu *spu;
|
||||||
|
enum spu_utilization_state old_state;
|
||||||
|
|
||||||
|
ktime_get_ts(&ts);
|
||||||
|
curtime = timespec_to_ns(&ts);
|
||||||
|
delta = curtime - ctx->stats.tstamp;
|
||||||
|
|
||||||
|
WARN_ON(!mutex_is_locked(&ctx->state_mutex));
|
||||||
|
WARN_ON(delta < 0);
|
||||||
|
|
||||||
|
spu = ctx->spu;
|
||||||
|
old_state = ctx->stats.util_state;
|
||||||
|
ctx->stats.util_state = new_state;
|
||||||
|
ctx->stats.tstamp = curtime;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the physical SPU utilization statistics.
|
||||||
|
*/
|
||||||
|
if (spu) {
|
||||||
|
ctx->stats.times[old_state] += delta;
|
||||||
|
spu->stats.times[old_state] += delta;
|
||||||
|
spu->stats.util_state = new_state;
|
||||||
|
spu->stats.tstamp = curtime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#define LOAD_INT(x) ((x) >> FSHIFT)
|
#define LOAD_INT(x) ((x) >> FSHIFT)
|
||||||
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
|
||||||
|
|
||||||
|
@ -307,41 +307,16 @@ struct spufs_coredump_reader {
|
|||||||
extern struct spufs_coredump_reader spufs_coredump_read[];
|
extern struct spufs_coredump_reader spufs_coredump_read[];
|
||||||
extern int spufs_coredump_num_notes;
|
extern int spufs_coredump_num_notes;
|
||||||
|
|
||||||
/*
|
extern int spu_init_csa(struct spu_state *csa);
|
||||||
* This function is a little bit too large for an inline, but
|
extern void spu_fini_csa(struct spu_state *csa);
|
||||||
* as fault.c is built into the kernel we can't move it out of
|
extern int spu_save(struct spu_state *prev, struct spu *spu);
|
||||||
* line.
|
extern int spu_restore(struct spu_state *new, struct spu *spu);
|
||||||
*/
|
extern int spu_switch(struct spu_state *prev, struct spu_state *new,
|
||||||
static inline void spuctx_switch_state(struct spu_context *ctx,
|
struct spu *spu);
|
||||||
enum spu_utilization_state new_state)
|
extern int spu_alloc_lscsa(struct spu_state *csa);
|
||||||
{
|
extern void spu_free_lscsa(struct spu_state *csa);
|
||||||
unsigned long long curtime;
|
|
||||||
signed long long delta;
|
|
||||||
struct timespec ts;
|
|
||||||
struct spu *spu;
|
|
||||||
enum spu_utilization_state old_state;
|
|
||||||
|
|
||||||
ktime_get_ts(&ts);
|
extern void spuctx_switch_state(struct spu_context *ctx,
|
||||||
curtime = timespec_to_ns(&ts);
|
enum spu_utilization_state new_state);
|
||||||
delta = curtime - ctx->stats.tstamp;
|
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&ctx->state_mutex));
|
|
||||||
WARN_ON(delta < 0);
|
|
||||||
|
|
||||||
spu = ctx->spu;
|
|
||||||
old_state = ctx->stats.util_state;
|
|
||||||
ctx->stats.util_state = new_state;
|
|
||||||
ctx->stats.tstamp = curtime;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Update the physical SPU utilization statistics.
|
|
||||||
*/
|
|
||||||
if (spu) {
|
|
||||||
ctx->stats.times[old_state] += delta;
|
|
||||||
spu->stats.times[old_state] += delta;
|
|
||||||
spu->stats.util_state = new_state;
|
|
||||||
spu->stats.tstamp = curtime;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -48,6 +48,8 @@
|
|||||||
#include <asm/spu_csa.h>
|
#include <asm/spu_csa.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
|
#include "spufs.h"
|
||||||
|
|
||||||
#include "spu_save_dump.h"
|
#include "spu_save_dump.h"
|
||||||
#include "spu_restore_dump.h"
|
#include "spu_restore_dump.h"
|
||||||
|
|
||||||
@ -2187,10 +2189,8 @@ int spu_init_csa(struct spu_state *csa)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(spu_init_csa);
|
|
||||||
|
|
||||||
void spu_fini_csa(struct spu_state *csa)
|
void spu_fini_csa(struct spu_state *csa)
|
||||||
{
|
{
|
||||||
spu_free_lscsa(csa);
|
spu_free_lscsa(csa);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(spu_fini_csa);
|
|
||||||
|
@ -283,6 +283,8 @@ void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
|
|||||||
int spu_add_sysdev_attr_group(struct attribute_group *attrs);
|
int spu_add_sysdev_attr_group(struct attribute_group *attrs);
|
||||||
void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
|
void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
|
||||||
|
|
||||||
|
int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
||||||
|
unsigned long dsisr, unsigned *flt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Notifier blocks:
|
* Notifier blocks:
|
||||||
|
@ -259,15 +259,6 @@ struct spu_state {
|
|||||||
spinlock_t register_lock;
|
spinlock_t register_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int spu_init_csa(struct spu_state *csa);
|
|
||||||
extern void spu_fini_csa(struct spu_state *csa);
|
|
||||||
extern int spu_save(struct spu_state *prev, struct spu *spu);
|
|
||||||
extern int spu_restore(struct spu_state *new, struct spu *spu);
|
|
||||||
extern int spu_switch(struct spu_state *prev, struct spu_state *new,
|
|
||||||
struct spu *spu);
|
|
||||||
extern int spu_alloc_lscsa(struct spu_state *csa);
|
|
||||||
extern void spu_free_lscsa(struct spu_state *csa);
|
|
||||||
|
|
||||||
#endif /* !__SPU__ */
|
#endif /* !__SPU__ */
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
Loading…
Reference in New Issue
Block a user