2005-04-16 22:20:36 +00:00
|
|
|
#ifndef _ASM_GENERIC_PERCPU_H_
|
|
|
|
#define _ASM_GENERIC_PERCPU_H_
|
2009-04-21 22:00:29 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/compiler.h>
|
2007-05-02 17:27:10 +00:00
|
|
|
#include <linux/threads.h>
|
2009-04-21 22:00:29 +00:00
|
|
|
#include <linux/percpu-defs.h>
|
2008-01-30 12:32:52 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
2008-01-30 12:32:52 +00:00
|
|
|
/*
|
|
|
|
* per_cpu_offset() is the offset that has to be added to a
|
|
|
|
* percpu variable to get to the instance for a certain processor.
|
|
|
|
*
|
|
|
|
* Most arches use the __per_cpu_offset array for those offsets but
|
|
|
|
* some arches have their own ways of determining the offset (x86_64, s390).
|
|
|
|
*/
|
|
|
|
#ifndef __per_cpu_offset
|
2005-04-16 22:20:36 +00:00
|
|
|
extern unsigned long __per_cpu_offset[NR_CPUS];
|
|
|
|
|
2006-07-03 07:24:26 +00:00
|
|
|
#define per_cpu_offset(x) (__per_cpu_offset[x])
|
2008-01-30 12:32:52 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine the offset for the currently active processor.
|
|
|
|
* An arch may define __my_cpu_offset to provide a more effective
|
|
|
|
* means of obtaining the offset to the per cpu variables of the
|
|
|
|
* current processor.
|
|
|
|
*/
|
|
|
|
#ifndef __my_cpu_offset
|
|
|
|
#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
|
2008-02-23 19:40:17 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
2008-01-30 12:32:52 +00:00
|
|
|
#define my_cpu_offset per_cpu_offset(smp_processor_id())
|
|
|
|
#else
|
|
|
|
#define my_cpu_offset __my_cpu_offset
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a offset to a pointer but keep the pointer as is.
|
|
|
|
*
|
|
|
|
* Only S390 provides its own means of moving the pointer.
|
|
|
|
*/
|
|
|
|
#ifndef SHIFT_PERCPU_PTR
|
|
|
|
#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
|
|
|
|
#endif
|
2006-07-03 07:24:26 +00:00
|
|
|
|
2008-01-30 12:32:52 +00:00
|
|
|
/*
|
2008-01-30 12:33:32 +00:00
|
|
|
* A percpu variable may point to a discarded regions. The following are
|
2008-01-30 12:32:52 +00:00
|
|
|
* established ways to produce a usable pointer from the percpu variable
|
|
|
|
* offset.
|
|
|
|
*/
|
|
|
|
#define per_cpu(var, cpu) \
|
2009-10-29 13:34:15 +00:00
|
|
|
(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
|
2008-01-30 12:32:52 +00:00
|
|
|
#define __get_cpu_var(var) \
|
2009-10-29 13:34:15 +00:00
|
|
|
(*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
|
2008-01-30 12:32:52 +00:00
|
|
|
#define __raw_get_cpu_var(var) \
|
2009-10-29 13:34:15 +00:00
|
|
|
(*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
|
2008-01-30 12:32:52 +00:00
|
|
|
|
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations
This patch introduces two things: First this_cpu_ptr and then per cpu
atomic operations.
this_cpu_ptr
------------
A common operation when dealing with cpu data is to get the instance of the
cpu data associated with the currently executing processor. This can be
optimized by
this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id).
The problem with per_cpu_ptr(x, smp_processor_id) is that it requires
an array lookup to find the offset for the cpu. Processors typically
have the offset for the current cpu area in some kind of (arch dependent)
efficiently accessible register or memory location.
We can use that instead of doing the array lookup to speed up the
determination of the address of the percpu variable. This is particularly
significant because these lookups occur in performance critical paths
of the core kernel. this_cpu_ptr() can avoid memory accesses and
this_cpu_ptr comes in two flavors. The preemption context matters since we
are referring the the currently executing processor. In many cases we must
insure that the processor does not change while a code segment is executed.
__this_cpu_ptr -> Do not check for preemption context
this_cpu_ptr -> Check preemption context
The parameter to these operations is a per cpu pointer. This can be the
address of a statically defined per cpu variable (&per_cpu_var(xxx)) or
the address of a per cpu variable allocated with the per cpu allocator.
per cpu atomic operations: this_cpu_*(var, val)
-----------------------------------------------
this_cpu_* operations (like this_cpu_add(struct->y, value) operate on
abitrary scalars that are members of structures allocated with the new
per cpu allocator. They can also operate on static per_cpu variables
if they are passed to per_cpu_var() (See patch to use this_cpu_*
operations for vm statistics).
These operations are guaranteed to be atomic vs preemption when modifying
the scalar. The calculation of the per cpu offset is also guaranteed to
be atomic at the same time. This means that a this_cpu_* operation can be
safely used to modify a per cpu variable in a context where interrupts are
enabled and preemption is allowed. Many architectures can perform such
a per cpu atomic operation with a single instruction.
Note that the atomicity here is different from regular atomic operations.
Atomicity is only guaranteed for data accessed from the currently executing
processor. Modifications from other processors are still possible. There
must be other guarantees that the per cpu data is not modified from another
processor when using these instruction. The per cpu atomicity is created
by the fact that the processor either executes and instruction or not.
Embedded in the instruction is the relocation of the per cpu address to
the are reserved for the current processor and the RMW action. Therefore
interrupts or preemption cannot occur in the mids of this processing.
Generic fallback functions are used if an arch does not define optimized
this_cpu operations. The functions come also come in the two flavors used
for this_cpu_ptr().
The firstparameter is a scalar that is a member of a structure allocated
through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The
operations are similar to what percpu_add() and friends do.
this_cpu_read(scalar)
this_cpu_write(scalar, value)
this_cpu_add(scale, value)
this_cpu_sub(scalar, value)
this_cpu_inc(scalar)
this_cpu_dec(scalar)
this_cpu_and(scalar, value)
this_cpu_or(scalar, value)
this_cpu_xor(scalar, value)
Arch code can override the generic functions and provide optimized atomic
per cpu operations. These atomic operations must provide both the relocation
(x86 does it through a segment override) and the operation on the data in a
single instruction. Otherwise preempt needs to be disabled and there is no
gain from providing arch implementations.
A third variant is provided prefixed by irqsafe_. These variants are safe
against hardware interrupts on the *same* processor (all per cpu atomic
primitives are *always* *only* providing safety for code running on the
*same* processor!). The increment needs to be implemented by the hardware
in such a way that it is a single RMW instruction that is either processed
before or after an interrupt.
cc: David Howells <dhowells@redhat.com>
cc: Ingo Molnar <mingo@elte.hu>
cc: Rusty Russell <rusty@rustcorp.com.au>
cc: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 10:48:22 +00:00
|
|
|
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
|
|
|
|
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
|
|
|
|
|
2008-01-30 12:32:52 +00:00
|
|
|
|
2008-01-30 12:33:32 +00:00
|
|
|
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
2008-01-30 12:32:52 +00:00
|
|
|
extern void setup_per_cpu_areas(void);
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#else /* ! SMP */
|
|
|
|
|
2009-10-29 13:34:15 +00:00
|
|
|
#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
|
|
|
|
#define __get_cpu_var(var) (var)
|
|
|
|
#define __raw_get_cpu_var(var) (var)
|
this_cpu: Introduce this_cpu_ptr() and generic this_cpu_* operations
This patch introduces two things: First this_cpu_ptr and then per cpu
atomic operations.
this_cpu_ptr
------------
A common operation when dealing with cpu data is to get the instance of the
cpu data associated with the currently executing processor. This can be
optimized by
this_cpu_ptr(xx) = per_cpu_ptr(xx, smp_processor_id).
The problem with per_cpu_ptr(x, smp_processor_id) is that it requires
an array lookup to find the offset for the cpu. Processors typically
have the offset for the current cpu area in some kind of (arch dependent)
efficiently accessible register or memory location.
We can use that instead of doing the array lookup to speed up the
determination of the address of the percpu variable. This is particularly
significant because these lookups occur in performance critical paths
of the core kernel. this_cpu_ptr() can avoid memory accesses and
this_cpu_ptr comes in two flavors. The preemption context matters since we
are referring the the currently executing processor. In many cases we must
insure that the processor does not change while a code segment is executed.
__this_cpu_ptr -> Do not check for preemption context
this_cpu_ptr -> Check preemption context
The parameter to these operations is a per cpu pointer. This can be the
address of a statically defined per cpu variable (&per_cpu_var(xxx)) or
the address of a per cpu variable allocated with the per cpu allocator.
per cpu atomic operations: this_cpu_*(var, val)
-----------------------------------------------
this_cpu_* operations (like this_cpu_add(struct->y, value) operate on
abitrary scalars that are members of structures allocated with the new
per cpu allocator. They can also operate on static per_cpu variables
if they are passed to per_cpu_var() (See patch to use this_cpu_*
operations for vm statistics).
These operations are guaranteed to be atomic vs preemption when modifying
the scalar. The calculation of the per cpu offset is also guaranteed to
be atomic at the same time. This means that a this_cpu_* operation can be
safely used to modify a per cpu variable in a context where interrupts are
enabled and preemption is allowed. Many architectures can perform such
a per cpu atomic operation with a single instruction.
Note that the atomicity here is different from regular atomic operations.
Atomicity is only guaranteed for data accessed from the currently executing
processor. Modifications from other processors are still possible. There
must be other guarantees that the per cpu data is not modified from another
processor when using these instruction. The per cpu atomicity is created
by the fact that the processor either executes and instruction or not.
Embedded in the instruction is the relocation of the per cpu address to
the are reserved for the current processor and the RMW action. Therefore
interrupts or preemption cannot occur in the mids of this processing.
Generic fallback functions are used if an arch does not define optimized
this_cpu operations. The functions come also come in the two flavors used
for this_cpu_ptr().
The firstparameter is a scalar that is a member of a structure allocated
through allocpercpu or a per cpu variable (use per_cpu_var(xxx)). The
operations are similar to what percpu_add() and friends do.
this_cpu_read(scalar)
this_cpu_write(scalar, value)
this_cpu_add(scale, value)
this_cpu_sub(scalar, value)
this_cpu_inc(scalar)
this_cpu_dec(scalar)
this_cpu_and(scalar, value)
this_cpu_or(scalar, value)
this_cpu_xor(scalar, value)
Arch code can override the generic functions and provide optimized atomic
per cpu operations. These atomic operations must provide both the relocation
(x86 does it through a segment override) and the operation on the data in a
single instruction. Otherwise preempt needs to be disabled and there is no
gain from providing arch implementations.
A third variant is provided prefixed by irqsafe_. These variants are safe
against hardware interrupts on the *same* processor (all per cpu atomic
primitives are *always* *only* providing safety for code running on the
*same* processor!). The increment needs to be implemented by the hardware
in such a way that it is a single RMW instruction that is either processed
before or after an interrupt.
cc: David Howells <dhowells@redhat.com>
cc: Ingo Molnar <mingo@elte.hu>
cc: Rusty Russell <rusty@rustcorp.com.au>
cc: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
2009-10-03 10:48:22 +00:00
|
|
|
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
|
|
|
|
#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#endif /* SMP */
|
|
|
|
|
2009-04-21 22:00:24 +00:00
|
|
|
#ifndef PER_CPU_BASE_SECTION
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define PER_CPU_BASE_SECTION ".data.percpu"
|
|
|
|
#else
|
|
|
|
#define PER_CPU_BASE_SECTION ".data"
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
#ifdef MODULE
|
|
|
|
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
2009-09-03 21:31:44 +00:00
|
|
|
#define PER_CPU_ALIGNED_SECTION ""
|
2009-04-21 22:00:24 +00:00
|
|
|
#else
|
|
|
|
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
|
2009-09-03 21:31:44 +00:00
|
|
|
#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
|
2009-04-21 22:00:24 +00:00
|
|
|
#endif
|
|
|
|
#define PER_CPU_FIRST_SECTION ".first"
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
2009-09-03 21:31:44 +00:00
|
|
|
#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
|
2009-04-21 22:00:24 +00:00
|
|
|
#define PER_CPU_FIRST_SECTION ""
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2008-01-30 12:32:52 +00:00
|
|
|
#ifndef PER_CPU_ATTRIBUTES
|
|
|
|
#define PER_CPU_ATTRIBUTES
|
|
|
|
#endif
|
|
|
|
|
2009-06-30 18:41:18 +00:00
|
|
|
#ifndef PER_CPU_DEF_ATTRIBUTES
|
|
|
|
#define PER_CPU_DEF_ATTRIBUTES
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* _ASM_GENERIC_PERCPU_H_ */
|