arch: convert smp_mb__*()

Convert deprecated smp_mb__*() barriers.

Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
Joonwoo Park 2014-07-24 21:10:04 -07:00 committed by Joonwoo Park
parent c5ac12693f
commit 67449d3d3e
4 changed files with 14 additions and 14 deletions

View File

@ -630,7 +630,7 @@ void smp_send_all_cpu_backtrace(void)
}
clear_bit(0, &backtrace_flag);
smp_mb__after_clear_bit();
smp_mb__after_atomic();
}
/*

View File

@ -571,7 +571,7 @@ static void smp_send_all_cpu_backtrace(void)
}
clear_bit(0, &backtrace_flag);
smp_mb__after_clear_bit();
smp_mb__after_atomic();
}
/*

View File

@ -290,7 +290,7 @@ void dm_table_put(struct dm_table *t)
if (!t)
return;
smp_mb__before_atomic_dec();
smp_mb__before_atomic();
atomic_dec(&t->holders);
}
EXPORT_SYMBOL(dm_table_put);

View File

@ -357,9 +357,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
}
rcu_prepare_for_idle(smp_processor_id());
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic_inc(); /* See above. */
smp_mb__before_atomic(); /* See above. */
atomic_inc(&rdtp->dynticks);
smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
smp_mb__after_atomic(); /* Force ordering with next sojourn. */
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
/*
@ -495,10 +495,10 @@ void rcu_irq_exit(void)
static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
int user)
{
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
smp_mb__after_atomic_inc(); /* See above. */
smp_mb__after_atomic(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
rcu_cleanup_after_idle(smp_processor_id());
trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
@ -641,10 +641,10 @@ void rcu_nmi_enter(void)
(atomic_read(&rdtp->dynticks) & 0x1))
return;
rdtp->dynticks_nmi_nesting++;
smp_mb__before_atomic_inc(); /* Force delay from prior write. */
smp_mb__before_atomic(); /* Force delay from prior write. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
smp_mb__after_atomic_inc(); /* See above. */
smp_mb__after_atomic(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
}
@ -663,9 +663,9 @@ void rcu_nmi_exit(void)
--rdtp->dynticks_nmi_nesting != 0)
return;
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
smp_mb__before_atomic_inc(); /* See above. */
smp_mb__before_atomic(); /* See above. */
atomic_inc(&rdtp->dynticks);
smp_mb__after_atomic_inc(); /* Force delay to next write. */
smp_mb__after_atomic(); /* Force delay to next write. */
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
}
@ -2659,7 +2659,7 @@ void synchronize_sched_expedited(void)
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
/* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone1);
return;
}
@ -2677,7 +2677,7 @@ void synchronize_sched_expedited(void)
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
/* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone2);
return;
}
@ -2706,7 +2706,7 @@ void synchronize_sched_expedited(void)
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
/* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(&rsp->expedited_done_lost);
break;
}