diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index dbeb3504c3c8..848f173db257 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -34,10 +34,6 @@ config GENERIC_HARDIRQS bool default y -config RWSEM_GENERIC_SPINLOCK - bool - default y - source "init/Kconfig" menu "Processor type and features" diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 5c018c503dfa..89e409e9e0de 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1102,7 +1102,7 @@ ENTRY(fast_syscall_sysxtensa) s32i a7, a2, PT_AREG7 movi a7, 4 # sizeof(unsigned int) - verify_area a3, a7, a0, a2, .Leac + access_ok a0, a3, a7, a2, .Leac _beqi a6, SYSXTENSA_ATOMIC_SET, .Lset _beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index e252b61e45a5..beba497e78df 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -104,7 +104,7 @@ sys_sigaction(int sig, const struct old_sigaction *act, if (act) { old_sigset_t mask; - if (verify_area(VERIFY_READ, act, sizeof(*act)) || + if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) return -EFAULT; @@ -116,7 +116,7 @@ sys_sigaction(int sig, const struct old_sigaction *act, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) return -EFAULT; @@ -236,7 +236,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4); err |= __get_user(buf, &sc->sc_cpstate); if (buf) { - if (verify_area(VERIFY_READ, buf, sizeof(*buf))) + if (!access_ok(VERIFY_READ, buf, sizeof(*buf))) goto badframe; err |= restore_cpextra(buf); } @@ -357,7 +357,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) if (regs->depc > 64) panic ("Double exception sys_sigreturn\n"); - if (verify_area(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask) @@ -394,7 +394,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) return 0; } - if (verify_area(VERIFY_READ, frame, sizeof(*frame))) + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h index bdc00ae9be48..03114f8d1e18 100644 --- a/include/asm-xtensa/checksum.h +++ b/include/asm-xtensa/checksum.h @@ -43,8 +43,7 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. * - * If you use these functions directly please don't forget the - * verify_area(). + * If you use these functions directly please don't forget the access_ok(). */ static inline unsigned int csum_partial_copy_nocheck ( const char *src, char *dst, diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h index 3c02b0e033f0..abcd86dc5ab9 100644 --- a/include/asm-xtensa/rwsem.h +++ b/include/asm-xtensa/rwsem.h @@ -172,4 +172,9 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) return atomic_add_return(delta, (atomic_t *)(&sem->count)); } -#endif /* _XTENSA_RWSEM_XADD_H */ +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return (sem->count != 0); +} + +#endif /* _XTENSA_RWSEM_H */ diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h index 06a22b83ba17..88a64e1144d5 100644 --- a/include/asm-xtensa/uaccess.h +++ b/include/asm-xtensa/uaccess.h @@ -154,35 +154,6 @@ .Laccess_ok_\@: .endm -/* - * verify_area determines whether a memory access is allowed. It's - * mostly an unnecessary wrapper for access_ok, but we provide it as a - * duplicate of the verify_area() C inline function below. See the - * equivalent C version below for clarity. - * - * On error, verify_area branches to a label indicated by parameter - * . This implies that the macro falls through to the next - * instruction on success. - * - * Note that we assume success is the common case, and we optimize the - * branch fall-through case on success. - * - * On Entry: - * register containing memory address - * register containing memory size - * temp register - * label to branch to on error; implies fall-through - * macro on success - * On Exit: - * preserved - * preserved - * destroyed - */ - .macro verify_area aa, as, at, sp, error - access_ok \at, \aa, \as, \sp, \error - .endm - - #else /* __ASSEMBLY__ not defined */ #include @@ -211,11 +182,6 @@ #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) -static inline int verify_area(int type, const void * addr, unsigned long size) -{ - return access_ok(type,addr,size) ? 0 : -EFAULT; -} - /* * These are the main single-value transfer routines. They * automatically use the right size if we just have the right pointer