[PATCH] for_each_possible_cpu: sparc64

for_each_cpu() actually iterates across all possible CPUs.  We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs.  This is inefficient and
possibly buggy.

We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.

This patch replaces for_each_cpu with for_each_possible_cpu.
for sparc64.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
KAMEZAWA Hiroyuki 2006-04-10 22:52:52 -07:00 committed by Linus Torvalds
parent fff8efe7b7
commit a283a52520
4 changed files with 6 additions and 6 deletions

View File

@ -1092,7 +1092,7 @@ void sun4v_pci_init(int node, char *model_name)
} }
} }
for_each_cpu(i) { for_each_possible_cpu(i) {
unsigned long page = get_zeroed_page(GFP_ATOMIC); unsigned long page = get_zeroed_page(GFP_ATOMIC);
if (!page) if (!page)

View File

@ -535,7 +535,7 @@ static int __init topology_init(void)
while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
ncpus_probed++; ncpus_probed++;
for_each_cpu(i) { for_each_possible_cpu(i) {
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) { if (p) {
register_cpu(p, i, NULL); register_cpu(p, i, NULL);

View File

@ -1280,7 +1280,7 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&prof_setup_lock, flags); spin_lock_irqsave(&prof_setup_lock, flags);
for_each_cpu(i) for_each_possible_cpu(i)
prof_multiplier(i) = multiplier; prof_multiplier(i) = multiplier;
current_tick_offset = (timer_tick_offset / multiplier); current_tick_offset = (timer_tick_offset / multiplier);
spin_unlock_irqrestore(&prof_setup_lock, flags); spin_unlock_irqrestore(&prof_setup_lock, flags);
@ -1308,12 +1308,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
} }
} }
for_each_cpu(i) { for_each_possible_cpu(i) {
if (tlb_type == hypervisor) { if (tlb_type == hypervisor) {
int j; int j;
/* XXX get this mapping from machine description */ /* XXX get this mapping from machine description */
for_each_cpu(j) { for_each_possible_cpu(j) {
if ((j >> 2) == (i >> 2)) if ((j >> 2) == (i >> 2))
cpu_set(j, cpu_sibling_map[i]); cpu_set(j, cpu_sibling_map[i]);
} }

View File

@ -26,7 +26,7 @@ register unsigned long __local_per_cpu_offset asm("g5");
#define percpu_modcopy(pcpudst, src, size) \ #define percpu_modcopy(pcpudst, src, size) \
do { \ do { \
unsigned int __i; \ unsigned int __i; \
for_each_cpu(__i) \ for_each_possible_cpu(__i) \
memcpy((pcpudst)+__per_cpu_offset(__i), \ memcpy((pcpudst)+__per_cpu_offset(__i), \
(src), (size)); \ (src), (size)); \
} while (0) } while (0)