2008-01-30 12:30:33 +00:00
|
|
|
#ifndef _X86_SPINLOCK_H_
|
|
|
|
#define _X86_SPINLOCK_H_
|
|
|
|
|
2008-01-30 12:30:34 +00:00
|
|
|
#include <asm/atomic.h>
|
|
|
|
#include <asm/rwlock.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/processor.h>
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
#include <linux/compiler.h>
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
#include <asm/paravirt.h>
|
2008-01-30 12:30:34 +00:00
|
|
|
/*
|
|
|
|
* Your basic SMP spinlocks, allowing only a single CPU anywhere
|
|
|
|
*
|
|
|
|
* Simple spin lock operations. There are two variants, one clears IRQ's
|
|
|
|
* on the local processor, one does not.
|
|
|
|
*
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
* These are fair FIFO ticket locks, which are currently limited to 256
|
|
|
|
* CPUs.
|
2008-01-30 12:30:34 +00:00
|
|
|
*
|
|
|
|
* (the type definitions are in asm/spinlock_types.h)
|
|
|
|
*/
|
|
|
|
|
2007-10-11 09:20:03 +00:00
|
|
|
#ifdef CONFIG_X86_32
|
2008-01-30 12:30:34 +00:00
|
|
|
# define LOCK_PTR_REG "a"
|
2007-10-11 09:20:03 +00:00
|
|
|
#else
|
2008-01-30 12:30:34 +00:00
|
|
|
# define LOCK_PTR_REG "D"
|
|
|
|
#endif
|
|
|
|
|
2008-01-30 12:33:00 +00:00
|
|
|
#if defined(CONFIG_X86_32) && \
|
|
|
|
(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
|
|
|
|
/*
|
|
|
|
* On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
|
|
|
|
* (PPro errata 66, 92)
|
|
|
|
*/
|
|
|
|
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
|
|
|
|
#else
|
|
|
|
# define UNLOCK_LOCK_PREFIX
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
#endif
|
|
|
|
|
2008-01-30 12:33:00 +00:00
|
|
|
/*
|
|
|
|
* Ticket locks are conceptually two parts, one indicating the current head of
|
|
|
|
* the queue, and the other indicating the current tail. The lock is acquired
|
|
|
|
* by atomically noting the tail and incrementing it by one (thus adding
|
|
|
|
* ourself to the queue and noting our position), then waiting until the head
|
|
|
|
* becomes equal to the the initial value of the tail.
|
|
|
|
*
|
|
|
|
* We use an xadd covering *both* parts of the lock, to increment the tail and
|
|
|
|
* also load the position of the head, which takes care of memory ordering
|
|
|
|
* issues and should be optimal for the uncontended case. Note the tail must be
|
|
|
|
* in the high part, because a wide xadd increment of the low part would carry
|
|
|
|
* up and contaminate the high part.
|
|
|
|
*
|
|
|
|
* With fewer than 2^8 possible CPUs, we can use x86's partial registers to
|
|
|
|
* save some instructions and make the code more elegant. There really isn't
|
|
|
|
* much between them in performance though, especially as locks are out of line.
|
|
|
|
*/
|
|
|
|
#if (NR_CPUS < 256)
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2008-05-11 02:52:43 +00:00
|
|
|
int tmp = ACCESS_ONCE(lock->slock);
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
|
|
|
|
return (((tmp >> 8) & 0xff) != (tmp & 0xff));
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2008-05-11 02:52:43 +00:00
|
|
|
int tmp = ACCESS_ONCE(lock->slock);
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
|
|
|
|
return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
short inc = 0x0100;
|
|
|
|
|
2008-03-23 08:03:31 +00:00
|
|
|
asm volatile (
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
LOCK_PREFIX "xaddw %w0, %1\n"
|
|
|
|
"1:\t"
|
|
|
|
"cmpb %h0, %b0\n\t"
|
|
|
|
"je 2f\n\t"
|
|
|
|
"rep ; nop\n\t"
|
|
|
|
"movb %1, %b0\n\t"
|
|
|
|
/* don't need lfence here, because loads are in-order */
|
2008-01-30 12:30:34 +00:00
|
|
|
"jmp 1b\n"
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
"2:"
|
2008-03-23 08:03:31 +00:00
|
|
|
: "+Q" (inc), "+m" (lock->slock)
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
:
|
2008-03-23 08:03:31 +00:00
|
|
|
: "memory", "cc");
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
int tmp;
|
|
|
|
short new;
|
2008-01-30 12:30:34 +00:00
|
|
|
|
2008-03-23 08:03:31 +00:00
|
|
|
asm volatile("movw %2,%w0\n\t"
|
|
|
|
"cmpb %h0,%b0\n\t"
|
|
|
|
"jne 1f\n\t"
|
|
|
|
"movw %w0,%w1\n\t"
|
|
|
|
"incb %h1\n\t"
|
|
|
|
"lock ; cmpxchgw %w1,%2\n\t"
|
|
|
|
"1:"
|
|
|
|
"sete %b1\n\t"
|
|
|
|
"movzbl %b1,%0\n\t"
|
|
|
|
: "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
|
|
|
|
:
|
|
|
|
: "memory", "cc");
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
|
|
|
|
return tmp;
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
2008-01-30 12:33:00 +00:00
|
|
|
{
|
2008-03-23 08:03:31 +00:00
|
|
|
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
|
|
|
|
: "+m" (lock->slock)
|
|
|
|
:
|
|
|
|
: "memory", "cc");
|
2008-01-30 12:33:00 +00:00
|
|
|
}
|
2008-01-30 12:30:34 +00:00
|
|
|
#else
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
|
2008-01-30 12:33:00 +00:00
|
|
|
{
|
2008-05-11 02:52:43 +00:00
|
|
|
int tmp = ACCESS_ONCE(lock->slock);
|
2008-01-30 12:33:00 +00:00
|
|
|
|
|
|
|
return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
|
|
|
|
}
|
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
2008-01-30 12:33:00 +00:00
|
|
|
{
|
2008-05-11 02:52:43 +00:00
|
|
|
int tmp = ACCESS_ONCE(lock->slock);
|
2008-01-30 12:33:00 +00:00
|
|
|
|
|
|
|
return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
|
|
|
|
}
|
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
2008-01-30 12:33:00 +00:00
|
|
|
{
|
|
|
|
int inc = 0x00010000;
|
|
|
|
int tmp;
|
|
|
|
|
2008-03-23 08:03:31 +00:00
|
|
|
asm volatile("lock ; xaddl %0, %1\n"
|
|
|
|
"movzwl %w0, %2\n\t"
|
|
|
|
"shrl $16, %0\n\t"
|
|
|
|
"1:\t"
|
|
|
|
"cmpl %0, %2\n\t"
|
|
|
|
"je 2f\n\t"
|
|
|
|
"rep ; nop\n\t"
|
|
|
|
"movzwl %1, %2\n\t"
|
|
|
|
/* don't need lfence here, because loads are in-order */
|
|
|
|
"jmp 1b\n"
|
|
|
|
"2:"
|
|
|
|
: "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
|
|
|
|
:
|
|
|
|
: "memory", "cc");
|
2008-01-30 12:33:00 +00:00
|
|
|
}
|
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
2008-01-30 12:33:00 +00:00
|
|
|
{
|
|
|
|
int tmp;
|
|
|
|
int new;
|
|
|
|
|
2008-03-23 08:03:31 +00:00
|
|
|
asm volatile("movl %2,%0\n\t"
|
|
|
|
"movl %0,%1\n\t"
|
|
|
|
"roll $16, %0\n\t"
|
|
|
|
"cmpl %0,%1\n\t"
|
|
|
|
"jne 1f\n\t"
|
|
|
|
"addl $0x00010000, %1\n\t"
|
|
|
|
"lock ; cmpxchgl %1,%2\n\t"
|
|
|
|
"1:"
|
|
|
|
"sete %b1\n\t"
|
|
|
|
"movzbl %b1,%0\n\t"
|
|
|
|
: "=&a" (tmp), "=r" (new), "+m" (lock->slock)
|
|
|
|
:
|
|
|
|
: "memory", "cc");
|
2008-01-30 12:33:00 +00:00
|
|
|
|
|
|
|
return tmp;
|
|
|
|
}
|
2008-01-30 12:30:34 +00:00
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
2008-01-30 12:30:34 +00:00
|
|
|
{
|
2008-03-23 08:03:31 +00:00
|
|
|
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
|
|
|
|
: "+m" (lock->slock)
|
|
|
|
:
|
|
|
|
: "memory", "cc");
|
2008-01-30 12:30:34 +00:00
|
|
|
}
|
2008-01-30 12:33:00 +00:00
|
|
|
#endif
|
2008-01-30 12:30:34 +00:00
|
|
|
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
|
|
|
|
2008-07-07 19:07:51 +00:00
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
|
|
/*
|
|
|
|
* Define virtualization-friendly old-style lock byte lock, for use in
|
|
|
|
* pv_lock_ops if desired.
|
|
|
|
*
|
|
|
|
* This differs from the pre-2.6.24 spinlock by always using xchgb
|
|
|
|
* rather than decb to take the lock; this allows it to use a
|
|
|
|
* zero-initialized lock structure. It also maintains a 1-byte
|
|
|
|
* contention counter, so that we can implement
|
|
|
|
* __byte_spin_is_contended.
|
|
|
|
*/
|
|
|
|
struct __byte_spinlock {
|
|
|
|
s8 lock;
|
|
|
|
s8 spinners;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
return bl->lock != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
return bl->spinners != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __byte_spin_lock(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
s8 val = 1;
|
|
|
|
|
|
|
|
asm("1: xchgb %1, %0\n"
|
|
|
|
" test %1,%1\n"
|
|
|
|
" jz 3f\n"
|
|
|
|
" " LOCK_PREFIX "incb %2\n"
|
|
|
|
"2: rep;nop\n"
|
|
|
|
" cmpb $1, %0\n"
|
|
|
|
" je 2b\n"
|
|
|
|
" " LOCK_PREFIX "decb %2\n"
|
|
|
|
" jmp 1b\n"
|
|
|
|
"3:"
|
|
|
|
: "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int __byte_spin_trylock(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
u8 old = 1;
|
|
|
|
|
|
|
|
asm("xchgb %1,%0"
|
|
|
|
: "+m" (bl->lock), "+q" (old) : : "memory");
|
|
|
|
|
|
|
|
return old == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __byte_spin_unlock(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
smp_wmb();
|
|
|
|
bl->lock = 0;
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_PARAVIRT */
|
x86/paravirt: add hooks for spinlock operations
Ticket spinlocks have absolutely ghastly worst-case performance
characteristics in a virtual environment. If there is any contention
for physical CPUs (ie, there are more runnable vcpus than cpus), then
ticket locks can cause the system to end up spending 90+% of its time
spinning.
The problem is that (v)cpus waiting on a ticket spinlock will be
granted access to the lock in strict order they got their tickets. If
the hypervisor scheduler doesn't give the vcpus time in that order,
they will burn timeslices waiting for the scheduler to give the right
vcpu some time. In the worst case it could take O(n^2) vcpu scheduler
timeslices for everyone waiting on the lock to get it, not counting
new cpus trying to take the lock while the log-jam is sorted out.
These hooks allow a paravirt backend to replace the spinlock
implementation.
At the very least, this could revert the implementation back to the
old lock algorithm, which allows the next scheduled vcpu to take the
lock, and has basically fairly good performance.
It also allows the spinlocks to take advantages of the hypervisor
features to make locks more efficient (spin and block, for example).
The cost to native execution is an extra direct call when using a
spinlock function. There's no overhead if CONFIG_PARAVIRT is turned
off.
The lock structure is fixed at a single "unsigned int", initialized to
zero, but the spinlock implementation can use it as it wishes.
Thanks to Thomas Friebel's Xen Summit talk "Preventing Guests from
Spinning Around" for pointing out this problem.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <clameter@linux-foundation.org>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: Virtualization <virtualization@lists.linux-foundation.org>
Cc: Xen devel <xen-devel@lists.xensource.com>
Cc: Thomas Friebel <thomas.friebel@amd.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-07 19:07:50 +00:00
|
|
|
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
return __ticket_spin_is_locked(lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
return __ticket_spin_is_contended(lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
__ticket_spin_lock(lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
return __ticket_spin_trylock(lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
__ticket_spin_unlock(lock);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
|
2008-01-30 12:30:34 +00:00
|
|
|
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
|
|
|
{
|
|
|
|
while (__raw_spin_is_locked(lock))
|
|
|
|
cpu_relax();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read-write spinlocks, allowing multiple readers
|
|
|
|
* but only one writer.
|
|
|
|
*
|
|
|
|
* NOTE! it is quite common to have readers in interrupts
|
|
|
|
* but no interrupt writers. For those circumstances we
|
|
|
|
* can "mix" irq-safe locks - any writer needs to get a
|
|
|
|
* irq-safe write-lock, but readers can get non-irqsafe
|
|
|
|
* read-locks.
|
|
|
|
*
|
|
|
|
* On x86, we implement read-write locks as a 32-bit counter
|
|
|
|
* with the high bit (sign) being the "contended" bit.
|
|
|
|
*/
|
|
|
|
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
/**
|
|
|
|
* read_can_lock - would read_trylock() succeed?
|
|
|
|
* @lock: the rwlock in question.
|
|
|
|
*/
|
2008-01-30 12:30:34 +00:00
|
|
|
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
|
|
|
|
{
|
|
|
|
return (int)(lock)->lock > 0;
|
|
|
|
}
|
|
|
|
|
x86: FIFO ticket spinlocks
Introduce ticket lock spinlocks for x86 which are FIFO. The implementation
is described in the comments. The straight-line lock/unlock instruction
sequence is slightly slower than the dec based locks on modern x86 CPUs,
however the difference is quite small on Core2 and Opteron when working out of
cache, and becomes almost insignificant even on P4 when the lock misses cache.
trylock is more significantly slower, but they are relatively rare.
On an 8 core (2 socket) Opteron, spinlock unfairness is extremely noticable,
with a userspace test having a difference of up to 2x runtime per thread, and
some threads are starved or "unfairly" granted the lock up to 1 000 000 (!)
times. After this patch, all threads appear to finish at exactly the same
time.
The memory ordering of the lock does conform to x86 standards, and the
implementation has been reviewed by Intel and AMD engineers.
The algorithm also tells us how many CPUs are contending the lock, so
lockbreak becomes trivial and we no longer have to waste 4 bytes per
spinlock for it.
After this, we can no longer spin on any locks with preempt enabled
and cannot reenable interrupts when spinning on an irq safe lock, because
at that point we have already taken a ticket and the would deadlock if
the same CPU tries to take the lock again. These are questionable anyway:
if the lock happens to be called under a preempt or interrupt disabled section,
then it will just have the same latency problems. The real fix is to keep
critical sections short, and ensure locks are reasonably fair (which this
patch does).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-01-30 12:31:21 +00:00
|
|
|
/**
|
|
|
|
* write_can_lock - would write_trylock() succeed?
|
|
|
|
* @lock: the rwlock in question.
|
|
|
|
*/
|
2008-01-30 12:30:34 +00:00
|
|
|
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
|
|
|
|
{
|
|
|
|
return (lock)->lock == RW_LOCK_BIAS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
|
|
|
{
|
|
|
|
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
|
|
|
|
"jns 1f\n"
|
|
|
|
"call __read_lock_failed\n\t"
|
|
|
|
"1:\n"
|
|
|
|
::LOCK_PTR_REG (rw) : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
|
|
|
{
|
|
|
|
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
|
|
|
|
"jz 1f\n"
|
|
|
|
"call __write_lock_failed\n\t"
|
|
|
|
"1:\n"
|
|
|
|
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
|
|
|
{
|
|
|
|
atomic_t *count = (atomic_t *)lock;
|
|
|
|
|
|
|
|
atomic_dec(count);
|
|
|
|
if (atomic_read(count) >= 0)
|
|
|
|
return 1;
|
|
|
|
atomic_inc(count);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int __raw_write_trylock(raw_rwlock_t *lock)
|
|
|
|
{
|
|
|
|
atomic_t *count = (atomic_t *)lock;
|
|
|
|
|
|
|
|
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
|
|
|
|
return 1;
|
|
|
|
atomic_add(RW_LOCK_BIAS, count);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
|
|
|
{
|
|
|
|
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
|
|
|
{
|
|
|
|
asm volatile(LOCK_PREFIX "addl %1, %0"
|
|
|
|
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
#define _raw_spin_relax(lock) cpu_relax()
|
|
|
|
#define _raw_read_relax(lock) cpu_relax()
|
|
|
|
#define _raw_write_relax(lock) cpu_relax()
|
|
|
|
|
2008-01-30 12:30:33 +00:00
|
|
|
#endif
|