mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-18 23:18:20 +00:00
fb1c8f93d8
This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler <grundler@parisc-linux.org> Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" <tony.luck@intel.com> ia64 fix Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjanv@infradead.org> Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se> Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
279 lines
6.2 KiB
C
279 lines
6.2 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 1999, 2000 by Ralf Baechle
|
|
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
|
*/
|
|
#ifndef _ASM_SPINLOCK_H
|
|
#define _ASM_SPINLOCK_H
|
|
|
|
#include <linux/config.h>
|
|
#include <asm/war.h>
|
|
|
|
/*
|
|
* Your basic SMP spinlocks, allowing only a single CPU anywhere
|
|
*/
|
|
|
|
#define __raw_spin_is_locked(x) ((x)->lock != 0)
|
|
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
|
#define __raw_spin_unlock_wait(x) \
|
|
do { cpu_relax(); } while ((x)->lock)
|
|
|
|
/*
|
|
* Simple spin lock operations. There are two variants, one clears IRQ's
|
|
* on the local processor, one does not.
|
|
*
|
|
* We make no fairness assumptions. They have a cost.
|
|
*/
|
|
|
|
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|
{
|
|
unsigned int tmp;
|
|
|
|
if (R10000_LLSC_WAR) {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_spin_lock \n"
|
|
"1: ll %1, %2 \n"
|
|
" bnez %1, 1b \n"
|
|
" li %1, 1 \n"
|
|
" sc %1, %0 \n"
|
|
" beqzl %1, 1b \n"
|
|
" nop \n"
|
|
" sync \n"
|
|
" .set reorder \n"
|
|
: "=m" (lock->lock), "=&r" (tmp)
|
|
: "m" (lock->lock)
|
|
: "memory");
|
|
} else {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_spin_lock \n"
|
|
"1: ll %1, %2 \n"
|
|
" bnez %1, 1b \n"
|
|
" li %1, 1 \n"
|
|
" sc %1, %0 \n"
|
|
" beqz %1, 1b \n"
|
|
" sync \n"
|
|
" .set reorder \n"
|
|
: "=m" (lock->lock), "=&r" (tmp)
|
|
: "m" (lock->lock)
|
|
: "memory");
|
|
}
|
|
}
|
|
|
|
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|
{
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_spin_unlock \n"
|
|
" sync \n"
|
|
" sw $0, %0 \n"
|
|
" .set\treorder \n"
|
|
: "=m" (lock->lock)
|
|
: "m" (lock->lock)
|
|
: "memory");
|
|
}
|
|
|
|
static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
|
|
{
|
|
unsigned int temp, res;
|
|
|
|
if (R10000_LLSC_WAR) {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_spin_trylock \n"
|
|
"1: ll %0, %3 \n"
|
|
" ori %2, %0, 1 \n"
|
|
" sc %2, %1 \n"
|
|
" beqzl %2, 1b \n"
|
|
" nop \n"
|
|
" andi %2, %0, 1 \n"
|
|
" sync \n"
|
|
" .set reorder"
|
|
: "=&r" (temp), "=m" (lock->lock), "=&r" (res)
|
|
: "m" (lock->lock)
|
|
: "memory");
|
|
} else {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_spin_trylock \n"
|
|
"1: ll %0, %3 \n"
|
|
" ori %2, %0, 1 \n"
|
|
" sc %2, %1 \n"
|
|
" beqz %2, 1b \n"
|
|
" andi %2, %0, 1 \n"
|
|
" sync \n"
|
|
" .set reorder"
|
|
: "=&r" (temp), "=m" (lock->lock), "=&r" (res)
|
|
: "m" (lock->lock)
|
|
: "memory");
|
|
}
|
|
|
|
return res == 0;
|
|
}
|
|
|
|
/*
|
|
* Read-write spinlocks, allowing multiple readers but only one writer.
|
|
*
|
|
* NOTE! it is quite common to have readers in interrupts but no interrupt
|
|
* writers. For those circumstances we can "mix" irq-safe locks - any writer
|
|
* needs to get a irq-safe write-lock, but readers can get non-irqsafe
|
|
* read-locks.
|
|
*/
|
|
|
|
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp;
|
|
|
|
if (R10000_LLSC_WAR) {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_read_lock \n"
|
|
"1: ll %1, %2 \n"
|
|
" bltz %1, 1b \n"
|
|
" addu %1, 1 \n"
|
|
" sc %1, %0 \n"
|
|
" beqzl %1, 1b \n"
|
|
" nop \n"
|
|
" sync \n"
|
|
" .set reorder \n"
|
|
: "=m" (rw->lock), "=&r" (tmp)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
} else {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_read_lock \n"
|
|
"1: ll %1, %2 \n"
|
|
" bltz %1, 1b \n"
|
|
" addu %1, 1 \n"
|
|
" sc %1, %0 \n"
|
|
" beqz %1, 1b \n"
|
|
" sync \n"
|
|
" .set reorder \n"
|
|
: "=m" (rw->lock), "=&r" (tmp)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
}
|
|
}
|
|
|
|
/* Note the use of sub, not subu which will make the kernel die with an
|
|
overflow exception if we ever try to unlock an rwlock that is already
|
|
unlocked or is being held by a writer. */
|
|
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp;
|
|
|
|
if (R10000_LLSC_WAR) {
|
|
__asm__ __volatile__(
|
|
"1: ll %1, %2 # __raw_read_unlock \n"
|
|
" sub %1, 1 \n"
|
|
" sc %1, %0 \n"
|
|
" beqzl %1, 1b \n"
|
|
" sync \n"
|
|
: "=m" (rw->lock), "=&r" (tmp)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
} else {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_read_unlock \n"
|
|
"1: ll %1, %2 \n"
|
|
" sub %1, 1 \n"
|
|
" sc %1, %0 \n"
|
|
" beqz %1, 1b \n"
|
|
" sync \n"
|
|
" .set reorder \n"
|
|
: "=m" (rw->lock), "=&r" (tmp)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
}
|
|
}
|
|
|
|
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp;
|
|
|
|
if (R10000_LLSC_WAR) {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_write_lock \n"
|
|
"1: ll %1, %2 \n"
|
|
" bnez %1, 1b \n"
|
|
" lui %1, 0x8000 \n"
|
|
" sc %1, %0 \n"
|
|
" beqzl %1, 1b \n"
|
|
" nop \n"
|
|
" sync \n"
|
|
" .set reorder \n"
|
|
: "=m" (rw->lock), "=&r" (tmp)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
} else {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_write_lock \n"
|
|
"1: ll %1, %2 \n"
|
|
" bnez %1, 1b \n"
|
|
" lui %1, 0x8000 \n"
|
|
" sc %1, %0 \n"
|
|
" beqz %1, 1b \n"
|
|
" nop \n"
|
|
" sync \n"
|
|
" .set reorder \n"
|
|
: "=m" (rw->lock), "=&r" (tmp)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
}
|
|
}
|
|
|
|
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
|
{
|
|
__asm__ __volatile__(
|
|
" sync # __raw_write_unlock \n"
|
|
" sw $0, %0 \n"
|
|
: "=m" (rw->lock)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
}
|
|
|
|
#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
|
|
|
|
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp;
|
|
int ret;
|
|
|
|
if (R10000_LLSC_WAR) {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_write_trylock \n"
|
|
" li %2, 0 \n"
|
|
"1: ll %1, %3 \n"
|
|
" bnez %1, 2f \n"
|
|
" lui %1, 0x8000 \n"
|
|
" sc %1, %0 \n"
|
|
" beqzl %1, 1b \n"
|
|
" nop \n"
|
|
" sync \n"
|
|
" li %2, 1 \n"
|
|
" .set reorder \n"
|
|
"2: \n"
|
|
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
} else {
|
|
__asm__ __volatile__(
|
|
" .set noreorder # __raw_write_trylock \n"
|
|
" li %2, 0 \n"
|
|
"1: ll %1, %3 \n"
|
|
" bnez %1, 2f \n"
|
|
" lui %1, 0x8000 \n"
|
|
" sc %1, %0 \n"
|
|
" beqz %1, 1b \n"
|
|
" sync \n"
|
|
" li %2, 1 \n"
|
|
" .set reorder \n"
|
|
"2: \n"
|
|
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
|
|
: "m" (rw->lock)
|
|
: "memory");
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
#endif /* _ASM_SPINLOCK_H */
|