mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-21 08:53:41 +00:00
048c8bc90e
Add CONFIG_DEBUG_PREEMPT support to ppc64: it was useful for testing get_paca() preemption. Cheat a little, just use debug_smp_processor_id() in the debug version of get_paca(): it contains all the right checks and reporting, though get_paca() doesn't really use smp_processor_id(). Use local_paca for what might have been called __raw_get_paca(). Silence harmless warnings from io.h and lparcfg.c with local_paca - it is okay for iseries_lparcfg_data to be referencing shared_proc with preemption enabled: all cpus should show the same value for shared_proc. Why do other architectures need TRACE_IRQFLAGS_SUPPORT for DEBUG_PREEMPT? I don't know, ppc64 appears to get along fine without it. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
67 lines
2.0 KiB
C
67 lines
2.0 KiB
C
#ifndef _ASM_POWERPC_PERCPU_H_
|
|
#define _ASM_POWERPC_PERCPU_H_
|
|
#ifdef __powerpc64__
|
|
#include <linux/compiler.h>
|
|
|
|
/*
|
|
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
|
* in the paca. Based on the x86-64 implementation.
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <asm/paca.h>
|
|
|
|
#define __per_cpu_offset(cpu) (paca[cpu].data_offset)
|
|
#define __my_cpu_offset() get_paca()->data_offset
|
|
#define per_cpu_offset(x) (__per_cpu_offset(x))
|
|
|
|
/* Separate out the type, so (int[3], foo) works. */
|
|
#define DEFINE_PER_CPU(type, name) \
|
|
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
|
|
|
|
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
|
__attribute__((__section__(".data.percpu.shared_aligned"))) \
|
|
__typeof__(type) per_cpu__##name \
|
|
____cacheline_aligned_in_smp
|
|
|
|
/* var is in discarded region: offset to particular copy we want */
|
|
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
|
|
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
|
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, local_paca->data_offset))
|
|
|
|
/* A macro to avoid #include hell... */
|
|
#define percpu_modcopy(pcpudst, src, size) \
|
|
do { \
|
|
unsigned int __i; \
|
|
for_each_possible_cpu(__i) \
|
|
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
|
(src), (size)); \
|
|
} while (0)
|
|
|
|
extern void setup_per_cpu_areas(void);
|
|
|
|
#else /* ! SMP */
|
|
|
|
#define DEFINE_PER_CPU(type, name) \
|
|
__typeof__(type) per_cpu__##name
|
|
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
|
DEFINE_PER_CPU(type, name)
|
|
|
|
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
|
#define __get_cpu_var(var) per_cpu__##var
|
|
#define __raw_get_cpu_var(var) per_cpu__##var
|
|
|
|
#endif /* SMP */
|
|
|
|
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
|
|
|
|
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
|
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
|
|
|
#else
|
|
#include <asm-generic/percpu.h>
|
|
#endif
|
|
|
|
#endif /* _ASM_POWERPC_PERCPU_H_ */
|