mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-27 11:55:53 +00:00
a53c8fab3f
Remove the file name from the comment at top of many files. In most cases the file name was wrong anyway, so it's rather pointless. Also unify the IBM copyright statement. We did have a lot of sightly different statements and wanted to change them one after another whenever a file gets touched. However that never happened. Instead people start to take the old/"wrong" statements to use as a template for new files. So unify all of them in one go. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
219 lines
4.5 KiB
C
219 lines
4.5 KiB
C
/*
|
|
* Out of line spinlock code.
|
|
*
|
|
* Copyright IBM Corp. 2004, 2006
|
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/module.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/init.h>
|
|
#include <linux/smp.h>
|
|
#include <asm/io.h>
|
|
|
|
int spin_retry = 1000;
|
|
|
|
/**
|
|
* spin_retry= parameter
|
|
*/
|
|
static int __init spin_retry_setup(char *str)
|
|
{
|
|
spin_retry = simple_strtoul(str, &str, 0);
|
|
return 1;
|
|
}
|
|
__setup("spin_retry=", spin_retry_setup);
|
|
|
|
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
|
{
|
|
int count = spin_retry;
|
|
unsigned int cpu = ~smp_processor_id();
|
|
unsigned int owner;
|
|
|
|
while (1) {
|
|
owner = lp->owner_cpu;
|
|
if (!owner || smp_vcpu_scheduled(~owner)) {
|
|
for (count = spin_retry; count > 0; count--) {
|
|
if (arch_spin_is_locked(lp))
|
|
continue;
|
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0,
|
|
cpu) == 0)
|
|
return;
|
|
}
|
|
if (MACHINE_IS_LPAR)
|
|
continue;
|
|
}
|
|
owner = lp->owner_cpu;
|
|
if (owner)
|
|
smp_yield_cpu(~owner);
|
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
|
return;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(arch_spin_lock_wait);
|
|
|
|
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
|
{
|
|
int count = spin_retry;
|
|
unsigned int cpu = ~smp_processor_id();
|
|
unsigned int owner;
|
|
|
|
local_irq_restore(flags);
|
|
while (1) {
|
|
owner = lp->owner_cpu;
|
|
if (!owner || smp_vcpu_scheduled(~owner)) {
|
|
for (count = spin_retry; count > 0; count--) {
|
|
if (arch_spin_is_locked(lp))
|
|
continue;
|
|
local_irq_disable();
|
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0,
|
|
cpu) == 0)
|
|
return;
|
|
local_irq_restore(flags);
|
|
}
|
|
if (MACHINE_IS_LPAR)
|
|
continue;
|
|
}
|
|
owner = lp->owner_cpu;
|
|
if (owner)
|
|
smp_yield_cpu(~owner);
|
|
local_irq_disable();
|
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
|
return;
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
|
|
|
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
|
{
|
|
unsigned int cpu = ~smp_processor_id();
|
|
int count;
|
|
|
|
for (count = spin_retry; count > 0; count--) {
|
|
if (arch_spin_is_locked(lp))
|
|
continue;
|
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(arch_spin_trylock_retry);
|
|
|
|
void arch_spin_relax(arch_spinlock_t *lock)
|
|
{
|
|
unsigned int cpu = lock->owner_cpu;
|
|
if (cpu != 0) {
|
|
if (MACHINE_IS_VM || MACHINE_IS_KVM ||
|
|
!smp_vcpu_scheduled(~cpu))
|
|
smp_yield_cpu(~cpu);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(arch_spin_relax);
|
|
|
|
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
|
{
|
|
unsigned int old;
|
|
int count = spin_retry;
|
|
|
|
while (1) {
|
|
if (count-- <= 0) {
|
|
smp_yield();
|
|
count = spin_retry;
|
|
}
|
|
if (!arch_read_can_lock(rw))
|
|
continue;
|
|
old = rw->lock & 0x7fffffffU;
|
|
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
|
return;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(_raw_read_lock_wait);
|
|
|
|
void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
|
|
{
|
|
unsigned int old;
|
|
int count = spin_retry;
|
|
|
|
local_irq_restore(flags);
|
|
while (1) {
|
|
if (count-- <= 0) {
|
|
smp_yield();
|
|
count = spin_retry;
|
|
}
|
|
if (!arch_read_can_lock(rw))
|
|
continue;
|
|
old = rw->lock & 0x7fffffffU;
|
|
local_irq_disable();
|
|
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
|
return;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
|
|
|
|
int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
|
{
|
|
unsigned int old;
|
|
int count = spin_retry;
|
|
|
|
while (count-- > 0) {
|
|
if (!arch_read_can_lock(rw))
|
|
continue;
|
|
old = rw->lock & 0x7fffffffU;
|
|
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(_raw_read_trylock_retry);
|
|
|
|
void _raw_write_lock_wait(arch_rwlock_t *rw)
|
|
{
|
|
int count = spin_retry;
|
|
|
|
while (1) {
|
|
if (count-- <= 0) {
|
|
smp_yield();
|
|
count = spin_retry;
|
|
}
|
|
if (!arch_write_can_lock(rw))
|
|
continue;
|
|
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
|
return;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(_raw_write_lock_wait);
|
|
|
|
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
|
|
{
|
|
int count = spin_retry;
|
|
|
|
local_irq_restore(flags);
|
|
while (1) {
|
|
if (count-- <= 0) {
|
|
smp_yield();
|
|
count = spin_retry;
|
|
}
|
|
if (!arch_write_can_lock(rw))
|
|
continue;
|
|
local_irq_disable();
|
|
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
|
return;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
|
|
|
|
int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
|
{
|
|
int count = spin_retry;
|
|
|
|
while (count-- > 0) {
|
|
if (!arch_write_can_lock(rw))
|
|
continue;
|
|
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(_raw_write_trylock_retry);
|