mirror of
https://github.com/joel16/android_kernel_sony_msm8994.git
synced 2024-11-26 21:51:03 +00:00
arch: Mass conversion of smp_mb__*()
Mostly scripted conversion of the smp_mb__* barriers. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-arch@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org> Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Git-commit: 4e857c58efeb99393cba5a5d0d8ec7117183137c [joonwoop@codeaurora.org: fixed trivial merge conflict.] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
This commit is contained in:
parent
7d9e69c77f
commit
c5ac12693f
@ -52,7 +52,7 @@ EXPORT_SYMBOL(blk_iopoll_sched);
|
||||
void __blk_iopoll_complete(struct blk_iopoll *iop)
|
||||
{
|
||||
list_del(&iop->list);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
|
||||
}
|
||||
EXPORT_SYMBOL(__blk_iopoll_complete);
|
||||
@ -164,7 +164,7 @@ EXPORT_SYMBOL(blk_iopoll_disable);
|
||||
void blk_iopoll_enable(struct blk_iopoll *iop)
|
||||
{
|
||||
BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_iopoll_enable);
|
||||
|
@ -126,7 +126,7 @@ static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
|
||||
int err = ctx->err;
|
||||
|
||||
if (!ctx->queue.qlen) {
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
|
||||
|
||||
if (!ctx->queue.qlen ||
|
||||
|
@ -106,7 +106,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
||||
static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
|
||||
{
|
||||
atomic_inc(&genpd->sd_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
|
||||
|
@ -240,9 +240,9 @@ static int get_slot(struct mtip_port *port)
|
||||
*/
|
||||
static inline void release_slot(struct mtip_port *port, int tag)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(tag, port->allocated);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -159,7 +159,7 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
|
||||
{
|
||||
int n = dev->coupled->online_count;
|
||||
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(a);
|
||||
|
||||
while (atomic_read(a) < n)
|
||||
|
@ -3504,7 +3504,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
|
||||
}
|
||||
|
||||
clear_bit_unlock(0, &ctx->flushing_completions);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
tasklet_enable(&ctx->context.tasklet);
|
||||
|
@ -158,7 +158,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
|
||||
*/
|
||||
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
/* Invalidate all timestamps while vblank irq's are off. */
|
||||
@ -936,7 +936,7 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
||||
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_add(diff, &dev->_vblank_count[crtc]);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1402,7 +1402,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||
*/
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
} else {
|
||||
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
|
||||
crtc, (int) diff_ns);
|
||||
|
@ -1094,7 +1094,7 @@ static void i915_error_work_func(struct work_struct *work)
|
||||
* updates before
|
||||
* the counter increment.
|
||||
*/
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
kobject_uevent_env(&dev->primary->kdev.kobj,
|
||||
|
@ -1141,7 +1141,7 @@ static inline bool cached_dev_get(struct cached_dev *dc)
|
||||
return false;
|
||||
|
||||
/* Paired with the mb in cached_dev_attach */
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -627,7 +627,7 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
|
||||
cl->fn = fn;
|
||||
cl->wq = wq;
|
||||
/* between atomic_dec() in closure_put() */
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
|
||||
#define continue_at(_cl, _fn, _wq) \
|
||||
|
@ -604,9 +604,9 @@ static void write_endio(struct bio *bio, int error)
|
||||
|
||||
BUG_ON(!test_bit(B_WRITING, &b->state));
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(B_WRITING, &b->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
wake_up_bit(&b->state, B_WRITING);
|
||||
}
|
||||
@ -973,9 +973,9 @@ static void read_endio(struct bio *bio, int error)
|
||||
|
||||
BUG_ON(!test_bit(B_READING, &b->state));
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(B_READING, &b->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
wake_up_bit(&b->state, B_READING);
|
||||
}
|
||||
|
@ -642,7 +642,7 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
|
||||
struct dm_snapshot *s = pe->snap;
|
||||
|
||||
mempool_free(pe, s->pending_pool);
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&s->pending_exceptions_count);
|
||||
}
|
||||
|
||||
@ -783,7 +783,7 @@ static int init_hash_tables(struct dm_snapshot *s)
|
||||
static void merge_shutdown(struct dm_snapshot *s)
|
||||
{
|
||||
clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&s->state_bits, RUNNING_MERGE);
|
||||
}
|
||||
|
||||
|
@ -2443,7 +2443,7 @@ static void dm_wq_work(struct work_struct *work)
|
||||
static void dm_queue_flush(struct mapped_device *md)
|
||||
{
|
||||
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
queue_work(md->wq, &md->work);
|
||||
}
|
||||
|
||||
|
@ -4146,7 +4146,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
|
||||
* STRIPE_ON_UNPLUG_LIST clear but the stripe
|
||||
* is still in our list
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
|
||||
__release_stripe(conf, sh);
|
||||
cnt++;
|
||||
|
@ -399,7 +399,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
|
||||
|
||||
/* clear 'streaming' status bit */
|
||||
clear_bit(ADAP_STREAMING, &adap->state_bits);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&adap->state_bits, ADAP_STREAMING);
|
||||
skip_feed_stop:
|
||||
|
||||
@ -550,7 +550,7 @@ static int dvb_usb_fe_init(struct dvb_frontend *fe)
|
||||
err:
|
||||
if (!adap->suspend_resume_active) {
|
||||
clear_bit(ADAP_INIT, &adap->state_bits);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&adap->state_bits, ADAP_INIT);
|
||||
}
|
||||
|
||||
@ -591,7 +591,7 @@ err:
|
||||
if (!adap->suspend_resume_active) {
|
||||
adap->active_fe = -1;
|
||||
clear_bit(ADAP_SLEEP, &adap->state_bits);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&adap->state_bits, ADAP_SLEEP);
|
||||
}
|
||||
|
||||
|
@ -2708,7 +2708,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
|
||||
case LOAD_OPEN:
|
||||
netif_tx_start_all_queues(bp->dev);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
break;
|
||||
|
||||
case LOAD_DIAG:
|
||||
|
@ -1802,10 +1802,10 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
|
||||
/* SRIOV: reschedule any 'in_progress' operations */
|
||||
bnx2x_iov_sp_event(bp, cid, true);
|
||||
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&bp->cq_spq_left);
|
||||
/* push the change in bp->spq_left and towards the memory */
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
|
||||
|
||||
@ -1820,11 +1820,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
|
||||
* sp_state is cleared, and this order prevents
|
||||
* races
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
|
||||
wmb();
|
||||
clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/* schedule the sp task as mcp ack is required */
|
||||
bnx2x_schedule_sp_task(bp);
|
||||
@ -4967,9 +4967,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
|
||||
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
|
||||
|
||||
/* mark latest Q bit */
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/* send Q update ramrod for FCoE Q */
|
||||
rc = bnx2x_queue_state_change(bp, &queue_params);
|
||||
@ -5200,7 +5200,7 @@ next_spqe:
|
||||
spqe_cnt++;
|
||||
} /* for */
|
||||
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_add(spqe_cnt, &bp->eq_spq_left);
|
||||
|
||||
bp->eq_cons = sw_cons;
|
||||
@ -13298,9 +13298,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
|
||||
case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
|
||||
int count = ctl->data.credit.credit_count;
|
||||
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_add(count, &bp->cq_spq_left);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
break;
|
||||
}
|
||||
case DRV_CTL_ULP_REGISTER_CMD: {
|
||||
|
@ -282,16 +282,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
|
||||
|
||||
static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(o->state, o->pstate);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(o->state, o->pstate);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2184,7 +2184,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
|
||||
|
||||
/* The operation is completed */
|
||||
clear_bit(p->state, p->pstate);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3643,16 +3643,16 @@ error_exit1:
|
||||
|
||||
static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(o->sched_state, o->raw.pstate);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(o->sched_state, o->raw.pstate);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
|
||||
@ -4272,7 +4272,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
|
||||
if (rc) {
|
||||
o->next_state = BNX2X_Q_STATE_MAX;
|
||||
clear_bit(pending_bit, pending);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -4361,7 +4361,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
|
||||
wmb();
|
||||
|
||||
clear_bit(cmd, &o->pending);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5309,7 +5309,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
|
||||
wmb();
|
||||
|
||||
clear_bit(cmd, &o->pending);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5947,7 +5947,7 @@ int bnx2x_func_state_change(struct bnx2x *bp,
|
||||
if (rc) {
|
||||
o->next_state = BNX2X_F_STATE_MAX;
|
||||
clear_bit(cmd, pending);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -960,10 +960,10 @@ op_err:
|
||||
op_done:
|
||||
case BNX2X_VFOP_QSETUP_DONE:
|
||||
vf->cfg_flags |= VF_CFG_VLAN;
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
|
||||
&bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
bnx2x_vfop_end(bp, vf, vfop);
|
||||
return;
|
||||
@ -2348,9 +2348,9 @@ static
|
||||
void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
|
||||
@ -3459,9 +3459,9 @@ int bnx2x_open_epilog(struct bnx2x *bp)
|
||||
* was set before PF driver was loaded.
|
||||
*/
|
||||
if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
}
|
||||
|
||||
|
@ -436,7 +436,7 @@ static int cnic_offld_prep(struct cnic_sock *csk)
|
||||
static int cnic_close_prep(struct cnic_sock *csk)
|
||||
{
|
||||
clear_bit(SK_F_CONNECT_START, &csk->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
|
||||
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
|
||||
@ -450,7 +450,7 @@ static int cnic_close_prep(struct cnic_sock *csk)
|
||||
static int cnic_abort_prep(struct cnic_sock *csk)
|
||||
{
|
||||
clear_bit(SK_F_CONNECT_START, &csk->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
|
||||
msleep(1);
|
||||
@ -3635,7 +3635,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk)
|
||||
|
||||
csk_hold(csk);
|
||||
clear_bit(SK_F_INUSE, &csk->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
while (atomic_read(&csk->ref_count) != 1)
|
||||
msleep(1);
|
||||
cnic_cm_cleanup(csk);
|
||||
@ -4015,7 +4015,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
|
||||
L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
|
||||
set_bit(SK_F_HW_ERR, &csk->flags);
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
|
||||
cnic_cm_upcall(cp, csk, opcode);
|
||||
break;
|
||||
|
@ -248,7 +248,7 @@ bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
|
||||
if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
|
||||
bna_ib_ack(tcb->i_dbell, sent);
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
||||
|
||||
return sent;
|
||||
@ -1024,7 +1024,7 @@ bnad_tx_cleanup(struct delayed_work *work)
|
||||
|
||||
bnad_txq_cleanup(bnad, tcb);
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
||||
}
|
||||
|
||||
@ -2813,7 +2813,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
sent = bnad_txcmpl_process(bnad, tcb);
|
||||
if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
|
||||
bna_ib_ack(tcb->i_dbell, sent);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
||||
} else {
|
||||
netif_stop_queue(netdev);
|
||||
|
@ -283,7 +283,7 @@ static int cxgb_close(struct net_device *dev)
|
||||
if (adapter->params.stats_update_period &&
|
||||
!(adapter->open_device_map & PORT_MASK)) {
|
||||
/* Stop statistics accumulation. */
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
spin_lock(&adapter->work_lock); /* sync with update task */
|
||||
spin_unlock(&adapter->work_lock);
|
||||
cancel_mac_stats_update(adapter);
|
||||
|
@ -1379,7 +1379,7 @@ static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
|
||||
struct sge_qset *qs = txq_to_qset(q, qid);
|
||||
|
||||
set_bit(qid, &qs->txq_stopped);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (should_restart_tx(q) &&
|
||||
test_and_clear_bit(qid, &qs->txq_stopped))
|
||||
@ -1492,7 +1492,7 @@ static void restart_ctrlq(unsigned long data)
|
||||
|
||||
if (!skb_queue_empty(&q->sendq)) {
|
||||
set_bit(TXQ_CTRL, &qs->txq_stopped);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (should_restart_tx(q) &&
|
||||
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
|
||||
@ -1698,7 +1698,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
|
||||
|
||||
if (unlikely(q->size - q->in_use < ndesc)) {
|
||||
set_bit(TXQ_OFLD, &qs->txq_stopped);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (should_restart_tx(q) &&
|
||||
test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
|
||||
|
@ -2001,7 +2001,7 @@ static void sge_rx_timer_cb(unsigned long data)
|
||||
struct sge_fl *fl = s->egr_map[id];
|
||||
|
||||
clear_bit(id, s->starving_fl);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (fl_starving(fl)) {
|
||||
rxq = container_of(fl, struct sge_eth_rxq, fl);
|
||||
|
@ -1950,7 +1950,7 @@ static void sge_rx_timer_cb(unsigned long data)
|
||||
struct sge_fl *fl = s->egr_map[id];
|
||||
|
||||
clear_bit(id, s->starving_fl);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/*
|
||||
* Since we are accessing fl without a lock there's a
|
||||
|
@ -207,7 +207,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
|
||||
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
|
||||
|
||||
/* flush memory to make sure state is correct before next watchdog */
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
|
||||
* wl1271_ps_elp_wakeup cannot be called concurrently.
|
||||
*/
|
||||
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
|
||||
if (ret < 0)
|
||||
|
@ -655,9 +655,9 @@ static void pcifront_do_aer(struct work_struct *data)
|
||||
notify_remote_via_evtchn(pdev->evtchn);
|
||||
|
||||
/*in case of we lost an aer request in four lines time_window*/
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(_PDEVB_op_active, &pdev->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
schedule_pcifront_aer_op(pdev);
|
||||
|
||||
|
@ -1541,7 +1541,7 @@ void isci_remote_device_release(struct kref *kref)
|
||||
clear_bit(IDEV_STOP_PENDING, &idev->flags);
|
||||
clear_bit(IDEV_IO_READY, &idev->flags);
|
||||
clear_bit(IDEV_GONE, &idev->flags);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(IDEV_ALLOCATED, &idev->flags);
|
||||
wake_up(&ihost->eventq);
|
||||
}
|
||||
|
@ -826,7 +826,7 @@ static int tcm_loop_port_link(
|
||||
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
|
||||
|
||||
atomic_inc(&tl_tpg->tl_tpg_port_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
/*
|
||||
* Add Linux/SCSI struct scsi_device by HCTL
|
||||
*/
|
||||
@ -861,7 +861,7 @@ static void tcm_loop_port_unlink(
|
||||
scsi_device_put(sd);
|
||||
|
||||
atomic_dec(&tl_tpg->tl_tpg_port_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
|
||||
}
|
||||
|
@ -313,7 +313,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
||||
continue;
|
||||
|
||||
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
@ -324,7 +324,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
||||
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
break;
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
@ -809,7 +809,7 @@ static int core_alua_do_transition_tg_pt(
|
||||
* TARGET PORT GROUPS command
|
||||
*/
|
||||
atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
|
||||
|
||||
spin_lock_bh(&port->sep_alua_lock);
|
||||
@ -836,7 +836,7 @@ static int core_alua_do_transition_tg_pt(
|
||||
|
||||
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
|
||||
atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
|
||||
/*
|
||||
@ -901,7 +901,7 @@ int core_alua_do_port_transition(
|
||||
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
|
||||
lu_gp = local_lu_gp_mem->lu_gp;
|
||||
atomic_inc(&lu_gp->lu_gp_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
|
||||
/*
|
||||
* For storage objects that are members of the 'default_lu_gp',
|
||||
@ -916,7 +916,7 @@ int core_alua_do_port_transition(
|
||||
core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
|
||||
md_buf, new_state, explict);
|
||||
atomic_dec(&lu_gp->lu_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
kfree(md_buf);
|
||||
return 0;
|
||||
}
|
||||
@ -931,7 +931,7 @@ int core_alua_do_port_transition(
|
||||
|
||||
dev = lu_gp_mem->lu_gp_mem_dev;
|
||||
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&lu_gp->lu_gp_lock);
|
||||
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
@ -960,7 +960,7 @@ int core_alua_do_port_transition(
|
||||
nacl = NULL;
|
||||
}
|
||||
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
/*
|
||||
* core_alua_do_transition_tg_pt() will always return
|
||||
@ -971,13 +971,13 @@ int core_alua_do_port_transition(
|
||||
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
spin_lock(&lu_gp->lu_gp_lock);
|
||||
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock(&lu_gp->lu_gp_lock);
|
||||
|
||||
@ -988,7 +988,7 @@ int core_alua_do_port_transition(
|
||||
core_alua_dump_state(new_state));
|
||||
|
||||
atomic_dec(&lu_gp->lu_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
kfree(md_buf);
|
||||
return 0;
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
|
||||
continue;
|
||||
|
||||
atomic_inc(&deve->pr_ref_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
|
||||
return deve;
|
||||
@ -1280,7 +1280,7 @@ int core_dev_add_initiator_node_lun_acl(
|
||||
spin_lock(&lun->lun_acl_lock);
|
||||
list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
|
||||
atomic_inc(&lun->lun_acl_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&lun->lun_acl_lock);
|
||||
|
||||
pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
|
||||
@ -1314,7 +1314,7 @@ int core_dev_del_initiator_node_lun_acl(
|
||||
spin_lock(&lun->lun_acl_lock);
|
||||
list_del(&lacl->lacl_list);
|
||||
atomic_dec(&lun->lun_acl_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&lun->lun_acl_lock);
|
||||
|
||||
core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
|
||||
|
@ -289,7 +289,7 @@ static void iblock_bio_done(struct bio *bio, int err)
|
||||
* Bump the ib_bio_err_cnt and release bio.
|
||||
*/
|
||||
atomic_inc(&ibr->ib_bio_err_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
bio_put(bio);
|
||||
|
@ -673,7 +673,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
|
||||
spin_lock(&dev->se_port_lock);
|
||||
list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
|
||||
atomic_inc(&port->sep_tg_pt_ref_cnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&dev->se_port_lock);
|
||||
|
||||
spin_lock_bh(&port->sep_alua_lock);
|
||||
@ -708,7 +708,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
|
||||
continue;
|
||||
|
||||
atomic_inc(&deve_tmp->pr_ref_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock_bh(&port->sep_alua_lock);
|
||||
/*
|
||||
* Grab a configfs group dependency that is released
|
||||
@ -721,9 +721,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
|
||||
pr_err("core_scsi3_lunacl_depend"
|
||||
"_item() failed\n");
|
||||
atomic_dec(&port->sep_tg_pt_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
atomic_dec(&deve_tmp->pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
@ -738,9 +738,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
|
||||
sa_res_key, all_tg_pt, aptpl);
|
||||
if (!pr_reg_atp) {
|
||||
atomic_dec(&port->sep_tg_pt_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
atomic_dec(&deve_tmp->pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
core_scsi3_lunacl_undepend_item(deve_tmp);
|
||||
goto out;
|
||||
}
|
||||
@ -753,7 +753,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
|
||||
|
||||
spin_lock(&dev->se_port_lock);
|
||||
atomic_dec(&port->sep_tg_pt_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock(&dev->se_port_lock);
|
||||
|
||||
@ -1113,7 +1113,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
|
||||
continue;
|
||||
}
|
||||
atomic_inc(&pr_reg->pr_res_holders);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&pr_tmpl->registration_lock);
|
||||
return pr_reg;
|
||||
}
|
||||
@ -1128,7 +1128,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
|
||||
continue;
|
||||
|
||||
atomic_inc(&pr_reg->pr_res_holders);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&pr_tmpl->registration_lock);
|
||||
return pr_reg;
|
||||
}
|
||||
@ -1158,7 +1158,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
|
||||
static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
|
||||
{
|
||||
atomic_dec(&pr_reg->pr_res_holders);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static int core_scsi3_check_implict_release(
|
||||
@ -1356,7 +1356,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
|
||||
&tpg->tpg_group.cg_item);
|
||||
|
||||
atomic_dec(&tpg->tpg_pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
|
||||
@ -1376,7 +1376,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
|
||||
|
||||
if (nacl->dynamic_node_acl) {
|
||||
atomic_dec(&nacl->acl_pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1384,7 +1384,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
|
||||
&nacl->acl_group.cg_item);
|
||||
|
||||
atomic_dec(&nacl->acl_pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
|
||||
@ -1415,7 +1415,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
|
||||
*/
|
||||
if (!lun_acl) {
|
||||
atomic_dec(&se_deve->pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
return;
|
||||
}
|
||||
nacl = lun_acl->se_lun_nacl;
|
||||
@ -1425,7 +1425,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
|
||||
&lun_acl->se_lun_group.cg_item);
|
||||
|
||||
atomic_dec(&se_deve->pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
@ -1559,14 +1559,14 @@ core_scsi3_decode_spec_i_port(
|
||||
continue;
|
||||
|
||||
atomic_inc(&tmp_tpg->tpg_pr_ref_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&dev->se_port_lock);
|
||||
|
||||
if (core_scsi3_tpg_depend_item(tmp_tpg)) {
|
||||
pr_err(" core_scsi3_tpg_depend_item()"
|
||||
" for tmp_tpg\n");
|
||||
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
goto out_unmap;
|
||||
}
|
||||
@ -1580,7 +1580,7 @@ core_scsi3_decode_spec_i_port(
|
||||
tmp_tpg, i_str);
|
||||
if (dest_node_acl) {
|
||||
atomic_inc(&dest_node_acl->acl_pr_ref_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock_irq(&tmp_tpg->acl_node_lock);
|
||||
|
||||
@ -1594,7 +1594,7 @@ core_scsi3_decode_spec_i_port(
|
||||
pr_err("configfs_depend_item() failed"
|
||||
" for dest_node_acl->acl_group\n");
|
||||
atomic_dec(&dest_node_acl->acl_pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
core_scsi3_tpg_undepend_item(tmp_tpg);
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
goto out_unmap;
|
||||
@ -1654,7 +1654,7 @@ core_scsi3_decode_spec_i_port(
|
||||
pr_err("core_scsi3_lunacl_depend_item()"
|
||||
" failed\n");
|
||||
atomic_dec(&dest_se_deve->pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
core_scsi3_nodeacl_undepend_item(dest_node_acl);
|
||||
core_scsi3_tpg_undepend_item(dest_tpg);
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
@ -3308,14 +3308,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
continue;
|
||||
|
||||
atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&dev->se_port_lock);
|
||||
|
||||
if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
|
||||
pr_err("core_scsi3_tpg_depend_item() failed"
|
||||
" for dest_se_tpg\n");
|
||||
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
goto out_put_pr_reg;
|
||||
}
|
||||
@ -3413,7 +3413,7 @@ after_iport_check:
|
||||
initiator_str);
|
||||
if (dest_node_acl) {
|
||||
atomic_inc(&dest_node_acl->acl_pr_ref_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock_irq(&dest_se_tpg->acl_node_lock);
|
||||
|
||||
@ -3429,7 +3429,7 @@ after_iport_check:
|
||||
pr_err("core_scsi3_nodeacl_depend_item() for"
|
||||
" dest_node_acl\n");
|
||||
atomic_dec(&dest_node_acl->acl_pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
dest_node_acl = NULL;
|
||||
ret = TCM_INVALID_PARAMETER_LIST;
|
||||
goto out;
|
||||
@ -3454,7 +3454,7 @@ after_iport_check:
|
||||
if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
|
||||
pr_err("core_scsi3_lunacl_depend_item() failed\n");
|
||||
atomic_dec(&dest_se_deve->pr_ref_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
dest_se_deve = NULL;
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
goto out;
|
||||
@ -4038,7 +4038,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
add_desc_len = 0;
|
||||
|
||||
atomic_inc(&pr_reg->pr_res_holders);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock(&pr_tmpl->registration_lock);
|
||||
/*
|
||||
* Determine expected length of $FABRIC_MOD specific
|
||||
@ -4052,7 +4052,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
" out of buffer: %d\n", cmd->data_length);
|
||||
spin_lock(&pr_tmpl->registration_lock);
|
||||
atomic_dec(&pr_reg->pr_res_holders);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
break;
|
||||
}
|
||||
/*
|
||||
@ -4114,7 +4114,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
|
||||
spin_lock(&pr_tmpl->registration_lock);
|
||||
atomic_dec(&pr_reg->pr_res_holders);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
/*
|
||||
* Set the ADDITIONAL DESCRIPTOR LENGTH
|
||||
*/
|
||||
|
@ -666,7 +666,7 @@ void target_qf_do_work(struct work_struct *work)
|
||||
list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
|
||||
list_del(&cmd->se_qf_node);
|
||||
atomic_dec(&dev->dev_qf_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
|
||||
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
|
||||
@ -1081,7 +1081,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
|
||||
* Dormant to Active status.
|
||||
*/
|
||||
cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
|
||||
cmd->se_ordered_id, cmd->sam_task_attr,
|
||||
dev->transport->name);
|
||||
@ -1618,7 +1618,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
|
||||
return false;
|
||||
case MSG_ORDERED_TAG:
|
||||
atomic_inc(&dev->dev_ordered_sync);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
|
||||
" se_ordered_id: %u\n",
|
||||
@ -1636,7 +1636,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
|
||||
* For SIMPLE and UNTAGGED Task Attribute commands
|
||||
*/
|
||||
atomic_inc(&dev->simple_cmds);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1741,7 +1741,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
|
||||
|
||||
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
|
||||
atomic_dec(&dev->simple_cmds);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
dev->dev_cur_ordered_id++;
|
||||
pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
|
||||
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
|
||||
@ -1753,7 +1753,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
|
||||
cmd->se_ordered_id);
|
||||
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
|
||||
atomic_dec(&dev->dev_ordered_sync);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
dev->dev_cur_ordered_id++;
|
||||
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
|
||||
@ -1809,7 +1809,7 @@ static void transport_handle_queue_full(
|
||||
spin_lock_irq(&dev->qf_cmd_lock);
|
||||
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
|
||||
atomic_inc(&dev->dev_qf_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
|
||||
|
||||
schedule_work(&cmd->se_dev->qf_work_queue);
|
||||
@ -2820,7 +2820,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
|
||||
if (cmd->data_direction == DMA_TO_DEVICE) {
|
||||
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
|
||||
cmd->transport_state |= CMD_T_ABORTED;
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
}
|
||||
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
|
||||
|
@ -163,7 +163,7 @@ int core_scsi3_ua_allocate(
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
|
||||
atomic_inc(&deve->ua_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
return 0;
|
||||
}
|
||||
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
|
||||
@ -176,7 +176,7 @@ int core_scsi3_ua_allocate(
|
||||
asc, ascq);
|
||||
|
||||
atomic_inc(&deve->ua_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ void core_scsi3_ua_release_all(
|
||||
kmem_cache_free(se_ua_cache, ua);
|
||||
|
||||
atomic_dec(&deve->ua_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock(&deve->ua_lock);
|
||||
}
|
||||
@ -252,7 +252,7 @@ void core_scsi3_ua_for_check_condition(
|
||||
kmem_cache_free(se_ua_cache, ua);
|
||||
|
||||
atomic_dec(&deve->ua_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock(&deve->ua_lock);
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
@ -311,7 +311,7 @@ int core_scsi3_ua_clear_for_request_sense(
|
||||
kmem_cache_free(se_ua_cache, ua);
|
||||
|
||||
atomic_dec(&deve->ua_count);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock(&deve->ua_lock);
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
|
@ -200,7 +200,7 @@ static void dma_tx_callback(void *param)
|
||||
|
||||
/* clear the bit used to serialize the DMA tx. */
|
||||
clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/* wake up the possible processes. */
|
||||
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
|
||||
@ -275,7 +275,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
|
||||
mxs_auart_dma_tx(s, i);
|
||||
} else {
|
||||
clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ static void usb_wwan_outdat_callback(struct urb *urb)
|
||||
|
||||
for (i = 0; i < N_OUT_URB; ++i) {
|
||||
if (portdata->out_urbs[i] == urb) {
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(i, &portdata->out_busy);
|
||||
break;
|
||||
}
|
||||
|
@ -1200,7 +1200,7 @@ static int vhost_scsi_set_endpoint(
|
||||
tv_tpg->tv_tpg_vhost_count++;
|
||||
tv_tpg->vhost_scsi = vs;
|
||||
vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
match = true;
|
||||
}
|
||||
mutex_unlock(&tv_tpg->tv_tpg_mutex);
|
||||
|
@ -131,9 +131,9 @@ void w1_family_get(struct w1_family *f)
|
||||
|
||||
void __w1_family_get(struct w1_family *f)
|
||||
{
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&f->refcnt);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(w1_unregister_family);
|
||||
|
@ -345,9 +345,9 @@ void xen_pcibk_do_op(struct work_struct *data)
|
||||
notify_remote_via_irq(pdev->evtchn_irq);
|
||||
|
||||
/* Mark that we're done. */
|
||||
smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
|
||||
smp_mb__before_atomic(); /* /after/ clearing PCIF_active */
|
||||
clear_bit(_PDEVF_op_active, &pdev->flags);
|
||||
smp_mb__after_clear_bit(); /* /before/ final check for work */
|
||||
smp_mb__after_atomic(); /* /before/ final check for work */
|
||||
|
||||
/* Check to see if the driver domain tried to start another request in
|
||||
* between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
|
||||
|
@ -231,7 +231,7 @@ static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
|
||||
|
||||
static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
}
|
||||
|
@ -3266,7 +3266,7 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
|
||||
static void end_extent_buffer_writeback(struct extent_buffer *eb)
|
||||
{
|
||||
clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
|
||||
}
|
||||
|
||||
|
@ -7083,7 +7083,7 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
|
||||
* before atomic variable goto zero, we must make sure
|
||||
* dip->errors is perceived to be set.
|
||||
*/
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
|
||||
/* if there are more bios still pending for this dio, just exit */
|
||||
@ -7260,7 +7260,7 @@ out_err:
|
||||
* before atomic variable goto zero, we must
|
||||
* make sure dip->errors is perceived to be set.
|
||||
*/
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
if (atomic_dec_and_test(&dip->pending_bios))
|
||||
bio_io_error(dip->orig_bio);
|
||||
|
||||
@ -7401,7 +7401,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
|
||||
return 0;
|
||||
|
||||
atomic_inc(&inode->i_dio_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (rw & WRITE) {
|
||||
count = iov_length(iov, nr_segs);
|
||||
|
@ -77,7 +77,7 @@ EXPORT_SYMBOL(__lock_buffer);
|
||||
void unlock_buffer(struct buffer_head *bh)
|
||||
{
|
||||
clear_bit_unlock(BH_Lock, &bh->b_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&bh->b_state, BH_Lock);
|
||||
}
|
||||
EXPORT_SYMBOL(unlock_buffer);
|
||||
|
@ -42,7 +42,7 @@ int ext4_resize_begin(struct super_block *sb)
|
||||
void ext4_resize_end(struct super_block *sb)
|
||||
{
|
||||
clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
|
||||
|
@ -283,7 +283,7 @@ static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holde
|
||||
static void gfs2_holder_wake(struct gfs2_holder *gh)
|
||||
{
|
||||
clear_bit(HIF_WAIT, &gh->gh_iflags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&gh->gh_iflags, HIF_WAIT);
|
||||
}
|
||||
|
||||
@ -416,7 +416,7 @@ static void gfs2_demote_wake(struct gfs2_glock *gl)
|
||||
{
|
||||
gl->gl_demote_state = LM_ST_EXCLUSIVE;
|
||||
clear_bit(GLF_DEMOTE, &gl->gl_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
|
||||
}
|
||||
|
||||
@ -625,7 +625,7 @@ out:
|
||||
|
||||
out_sched:
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
gfs2_glock_hold(gl);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put_nolock(gl);
|
||||
@ -633,7 +633,7 @@ out_sched:
|
||||
|
||||
out_unlock:
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
|
||||
* Writeback of the data mapping may cause the dirty flag to be set
|
||||
* so we have to clear it again here.
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(GLF_DIRTY, &gl->gl_flags);
|
||||
}
|
||||
|
||||
|
@ -1132,7 +1132,7 @@ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
|
||||
queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
|
||||
|
||||
clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
|
||||
spin_unlock(&ls->ls_recover_spin);
|
||||
}
|
||||
@ -1269,7 +1269,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
|
||||
|
||||
ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
|
||||
clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
|
||||
return 0;
|
||||
|
||||
|
@ -587,7 +587,7 @@ fail:
|
||||
gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
|
||||
done:
|
||||
clear_bit(JDF_RECOVERY, &jd->jd_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
|
||||
}
|
||||
|
||||
|
@ -332,7 +332,7 @@ static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
|
||||
else if (val == 0) {
|
||||
clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
gfs2_glock_thaw(sdp);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
@ -481,7 +481,7 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
|
||||
rv = jid = -EINVAL;
|
||||
sdp->sd_lockstruct.ls_jid = jid;
|
||||
clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
|
||||
out:
|
||||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
|
@ -238,7 +238,7 @@ static int journal_submit_data_buffers(journal_t *journal,
|
||||
spin_lock(&journal->j_list_lock);
|
||||
J_ASSERT(jinode->i_transaction == commit_transaction);
|
||||
clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
|
||||
}
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
@ -276,7 +276,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
|
||||
}
|
||||
spin_lock(&journal->j_list_lock);
|
||||
clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
|
||||
}
|
||||
|
||||
|
12
fs/nfs/dir.c
12
fs/nfs/dir.c
@ -1942,9 +1942,9 @@ static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
||||
{
|
||||
put_rpccred(entry->cred);
|
||||
kfree(entry);
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_long_dec(&nfs_access_nr_entries);
|
||||
smp_mb__after_atomic_dec();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static void nfs_access_free_list(struct list_head *head)
|
||||
@ -1990,9 +1990,9 @@ int nfs_access_cache_shrinker(struct shrinker *shrink,
|
||||
else {
|
||||
remove_lru_entry:
|
||||
list_del_init(&nfsi->access_cache_inode_lru);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
@ -2134,9 +2134,9 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
|
||||
nfs_access_add_rbtree(inode, cache);
|
||||
|
||||
/* Update accounting */
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_long_inc(&nfs_access_nr_entries);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/* Add inode to global LRU list */
|
||||
if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
|
||||
|
@ -784,9 +784,9 @@ static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
|
||||
|
||||
static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
|
||||
}
|
||||
|
||||
|
@ -1166,9 +1166,9 @@ static int nfs4_run_state_manager(void *);
|
||||
|
||||
static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
|
||||
rpc_wake_up(&clp->cl_rpcwaitq);
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ nfs_iocounter_dec(struct nfs_io_counter *c)
|
||||
{
|
||||
if (atomic_dec_and_test(&c->io_count)) {
|
||||
clear_bit(NFS_IO_INPROGRESS, &c->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
|
||||
}
|
||||
}
|
||||
@ -193,9 +193,9 @@ void nfs_unlock_request(struct nfs_page *req)
|
||||
printk(KERN_ERR "NFS: Invalid unlock attempted\n");
|
||||
BUG();
|
||||
}
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(PG_BUSY, &req->wb_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&req->wb_flags, PG_BUSY);
|
||||
}
|
||||
|
||||
|
@ -1811,7 +1811,7 @@ static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *lis
|
||||
}
|
||||
|
||||
clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
|
||||
}
|
||||
|
||||
|
@ -273,7 +273,7 @@ pnfs_get_lseg(struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
if (lseg) {
|
||||
atomic_inc(&lseg->pls_refcount);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
return lseg;
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
|
||||
nfs_pageio_complete(&pgio);
|
||||
|
||||
clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(bitlock, NFS_INO_FLUSHING);
|
||||
|
||||
if (err < 0)
|
||||
@ -1406,7 +1406,7 @@ static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
|
||||
static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
|
||||
{
|
||||
clear_bit(NFS_INO_COMMIT, &nfsi->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
|
||||
}
|
||||
|
||||
|
@ -460,9 +460,9 @@ static int write_cnodes(struct ubifs_info *c)
|
||||
* important.
|
||||
*/
|
||||
clear_bit(DIRTY_CNODE, &cnode->flags);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(COW_CNODE, &cnode->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
offs += len;
|
||||
dbg_chk_lpt_sz(c, 1, len);
|
||||
cnode = cnode->cnext;
|
||||
|
@ -895,9 +895,9 @@ static int write_index(struct ubifs_info *c)
|
||||
* the reason for the second barrier.
|
||||
*/
|
||||
clear_bit(DIRTY_ZNODE, &znode->flags);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(COW_ZNODE, &znode->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/*
|
||||
* We have marked the znode as clean but have not updated the
|
||||
|
@ -80,7 +80,7 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
|
||||
*
|
||||
* clear_bit() is atomic and may not be reordered. However, it does
|
||||
* not contain a memory barrier, so if it is used for locking purposes,
|
||||
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
|
||||
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
|
||||
* in order to ensure changes are visible on other processors.
|
||||
*/
|
||||
static inline void clear_bit(int nr, volatile unsigned long *addr)
|
||||
|
@ -20,7 +20,7 @@
|
||||
*/
|
||||
#define clear_bit_unlock(nr, addr) \
|
||||
do { \
|
||||
smp_mb__before_clear_bit(); \
|
||||
smp_mb__before_atomic(); \
|
||||
clear_bit(nr, addr); \
|
||||
} while (0)
|
||||
|
||||
|
@ -272,7 +272,7 @@ static inline void get_bh(struct buffer_head *bh)
|
||||
|
||||
static inline void put_bh(struct buffer_head *bh)
|
||||
{
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&bh->b_count);
|
||||
}
|
||||
|
||||
|
@ -649,7 +649,7 @@ static inline void hd_ref_init(struct hd_struct *part)
|
||||
static inline void hd_struct_get(struct hd_struct *part)
|
||||
{
|
||||
atomic_inc(&part->ref);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static inline int hd_struct_try_get(struct hd_struct *part)
|
||||
|
@ -459,7 +459,7 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
|
||||
|
||||
static inline void tasklet_unlock(struct tasklet_struct *t)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(TASKLET_STATE_RUN, &(t)->state);
|
||||
}
|
||||
|
||||
@ -507,7 +507,7 @@ static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
|
||||
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
|
||||
{
|
||||
atomic_inc(&t->count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static inline void tasklet_disable(struct tasklet_struct *t)
|
||||
@ -519,13 +519,13 @@ static inline void tasklet_disable(struct tasklet_struct *t)
|
||||
|
||||
static inline void tasklet_enable(struct tasklet_struct *t)
|
||||
{
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&t->count);
|
||||
}
|
||||
|
||||
static inline void tasklet_hi_enable(struct tasklet_struct *t)
|
||||
{
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&t->count);
|
||||
}
|
||||
|
||||
|
@ -470,7 +470,7 @@ static inline void napi_disable(struct napi_struct *n)
|
||||
static inline void napi_enable(struct napi_struct *n)
|
||||
{
|
||||
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(NAPI_STATE_SCHED, &n->state);
|
||||
}
|
||||
|
||||
|
@ -2666,10 +2666,8 @@ static inline bool __must_check current_set_polling_and_test(void)
|
||||
/*
|
||||
* Polling state must be visible before we test NEED_RESCHED,
|
||||
* paired by resched_task()
|
||||
*
|
||||
* XXX: assumes set/clear bit are identical barrier wise.
|
||||
*/
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return unlikely(tif_need_resched());
|
||||
}
|
||||
@ -2687,7 +2685,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
|
||||
* Polling state must be visible before we test NEED_RESCHED,
|
||||
* paired by resched_task()
|
||||
*/
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return unlikely(tif_need_resched());
|
||||
}
|
||||
|
@ -150,18 +150,18 @@ struct rpc_task_setup {
|
||||
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
|
||||
#define rpc_clear_running(t) \
|
||||
do { \
|
||||
smp_mb__before_clear_bit(); \
|
||||
smp_mb__before_atomic(); \
|
||||
clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
|
||||
smp_mb__after_clear_bit(); \
|
||||
smp_mb__after_atomic(); \
|
||||
} while (0)
|
||||
|
||||
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
|
||||
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
|
||||
#define rpc_clear_queued(t) \
|
||||
do { \
|
||||
smp_mb__before_clear_bit(); \
|
||||
smp_mb__before_atomic(); \
|
||||
clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
|
||||
smp_mb__after_clear_bit(); \
|
||||
smp_mb__after_atomic(); \
|
||||
} while (0)
|
||||
|
||||
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
|
||||
|
@ -368,9 +368,9 @@ static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
|
||||
|
||||
static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_CONNECTING, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static inline int xprt_connecting(struct rpc_xprt *xprt)
|
||||
@ -400,9 +400,9 @@ static inline void xprt_clear_bound(struct rpc_xprt *xprt)
|
||||
|
||||
static inline void xprt_clear_binding(struct rpc_xprt *xprt)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_BINDING, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
|
||||
|
@ -191,7 +191,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
|
||||
* pairs with task_work_add()->set_notify_resume() after
|
||||
* hlist_add_head(task->task_works);
|
||||
*/
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
if (unlikely(current->task_works))
|
||||
task_work_run();
|
||||
}
|
||||
|
@ -1195,7 +1195,7 @@ static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
|
||||
/* put back the conn without restarting its timer */
|
||||
static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
|
||||
{
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&cp->refcnt);
|
||||
}
|
||||
extern void ip_vs_conn_put(struct ip_vs_conn *cp);
|
||||
@ -1403,7 +1403,7 @@ static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
|
||||
|
||||
static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
|
||||
{
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&dest->refcnt);
|
||||
}
|
||||
|
||||
|
@ -532,7 +532,7 @@ return_normal:
|
||||
kgdb_info[cpu].exception_state &=
|
||||
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
|
||||
kgdb_info[cpu].enter_kgdb--;
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&slaves_in_kgdb);
|
||||
dbg_touch_watchdogs();
|
||||
local_irq_restore(flags);
|
||||
@ -656,7 +656,7 @@ kgdb_restore:
|
||||
kgdb_info[cpu].exception_state &=
|
||||
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
|
||||
kgdb_info[cpu].enter_kgdb--;
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&masters_in_kgdb);
|
||||
/* Free kgdb_active */
|
||||
atomic_set(&kgdb_active, -1);
|
||||
|
@ -495,7 +495,7 @@ int __usermodehelper_disable(enum umh_disable_depth depth)
|
||||
static void helper_lock(void)
|
||||
{
|
||||
atomic_inc(&running_helpers);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static void helper_unlock(void)
|
||||
|
@ -165,7 +165,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||
* do a write memory barrier, and then update the count, to
|
||||
* make sure the vector is visible when count is set.
|
||||
*/
|
||||
smp_mb__before_atomic_inc();
|
||||
smp_mb__before_atomic();
|
||||
atomic_inc(&(vec)->count);
|
||||
do_mb = 1;
|
||||
}
|
||||
@ -185,14 +185,14 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||
* the new priority vec.
|
||||
*/
|
||||
if (do_mb)
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/*
|
||||
* When removing from the vector, we decrement the counter first
|
||||
* do a memory barrier and then clear the mask.
|
||||
*/
|
||||
atomic_dec(&(vec)->count);
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
cpumask_clear_cpu(cpu, vec->mask);
|
||||
}
|
||||
|
||||
|
@ -557,7 +557,7 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
|
||||
bit = sync ? BDI_sync_congested : BDI_async_congested;
|
||||
if (test_and_clear_bit(bit, &bdi->state))
|
||||
atomic_dec(&nr_bdi_congested[sync]);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
if (waitqueue_active(wqh))
|
||||
wake_up(wqh);
|
||||
}
|
||||
|
@ -607,7 +607,7 @@ void unlock_page(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageLocked(page));
|
||||
clear_bit_unlock(PG_locked, &page->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_page(page, PG_locked);
|
||||
}
|
||||
EXPORT_SYMBOL(unlock_page);
|
||||
@ -624,7 +624,7 @@ void end_page_writeback(struct page *page)
|
||||
if (!test_clear_page_writeback(page))
|
||||
BUG();
|
||||
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_page(page, PG_writeback);
|
||||
}
|
||||
EXPORT_SYMBOL(end_page_writeback);
|
||||
|
@ -252,7 +252,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
|
||||
* we need to ensure there's a memory barrier after it. The bit
|
||||
* *must* be set before we do the atomic_inc() on pvcc->inflight.
|
||||
* There's no smp_mb__after_set_bit(), so it's this or abuse
|
||||
* smp_mb__after_clear_bit().
|
||||
* smp_mb__after_atomic().
|
||||
*/
|
||||
test_and_set_bit(BLOCKED, &pvcc->blocked);
|
||||
|
||||
|
@ -48,7 +48,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
clear_bit(HCI_INQUIRY, &hdev->flags);
|
||||
smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
|
||||
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
|
||||
wake_up_bit(&hdev->flags, HCI_INQUIRY);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
@ -1601,7 +1601,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
|
||||
return;
|
||||
|
||||
smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
|
||||
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
|
||||
wake_up_bit(&hdev->flags, HCI_INQUIRY);
|
||||
|
||||
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
|
||||
|
@ -1312,7 +1312,7 @@ static int __dev_close_many(struct list_head *head)
|
||||
* dev->stop() will invoke napi_disable() on all of it's
|
||||
* napi_struct instances on this device.
|
||||
*/
|
||||
smp_mb__after_clear_bit(); /* Commit netif_running(). */
|
||||
smp_mb__after_atomic(); /* Commit netif_running(). */
|
||||
}
|
||||
|
||||
dev_deactivate_many(head);
|
||||
@ -3255,7 +3255,7 @@ static void net_tx_action(struct softirq_action *h)
|
||||
|
||||
root_lock = qdisc_lock(q);
|
||||
if (spin_trylock(root_lock)) {
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(__QDISC_STATE_SCHED,
|
||||
&q->state);
|
||||
qdisc_run(q);
|
||||
@ -3265,7 +3265,7 @@ static void net_tx_action(struct softirq_action *h)
|
||||
&q->state)) {
|
||||
__netif_reschedule(q);
|
||||
} else {
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(__QDISC_STATE_SCHED,
|
||||
&q->state);
|
||||
}
|
||||
@ -4095,7 +4095,7 @@ void __napi_complete(struct napi_struct *n)
|
||||
BUG_ON(n->gro_list);
|
||||
|
||||
list_del(&n->poll_list);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(NAPI_STATE_SCHED, &n->state);
|
||||
}
|
||||
EXPORT_SYMBOL(__napi_complete);
|
||||
|
@ -144,7 +144,7 @@ static void linkwatch_do_dev(struct net_device *dev)
|
||||
* Make sure the above read is complete since it can be
|
||||
* rewritten as soon as we clear the bit below.
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
|
||||
/* We are about to handle this device,
|
||||
* so new events can be accepted
|
||||
|
@ -529,7 +529,7 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
|
||||
void inet_putpeer(struct inet_peer *p)
|
||||
{
|
||||
p->dtime = (__u32)jiffies;
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&p->refcnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_putpeer);
|
||||
|
@ -1889,10 +1889,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
||||
/* It is possible TX completion already happened
|
||||
* before we set TSQ_THROTTLED, so we must
|
||||
* test again the condition.
|
||||
* We abuse smp_mb__after_clear_bit() because
|
||||
* there is no smp_mb__after_set_bit() yet
|
||||
*/
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
if (atomic_read(&sk->sk_wmem_alloc) > limit)
|
||||
break;
|
||||
}
|
||||
|
@ -598,7 +598,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
|
||||
{
|
||||
atomic64_set(&ic->i_ack_next, seq);
|
||||
if (ack_required) {
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
|
||||
}
|
||||
}
|
||||
@ -606,7 +606,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
|
||||
static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
|
||||
{
|
||||
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return atomic64_read(&ic->i_ack_next);
|
||||
}
|
||||
|
@ -429,7 +429,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
|
||||
{
|
||||
atomic64_set(&ic->i_ack_next, seq);
|
||||
if (ack_required) {
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
|
||||
}
|
||||
}
|
||||
@ -437,7 +437,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
|
||||
static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
|
||||
{
|
||||
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
return atomic64_read(&ic->i_ack_next);
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ static int acquire_in_xmit(struct rds_connection *conn)
|
||||
static void release_in_xmit(struct rds_connection *conn)
|
||||
{
|
||||
clear_bit(RDS_IN_XMIT, &conn->c_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
/*
|
||||
* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
|
||||
* hot path and finding waiters is very rare. We don't want to walk
|
||||
@ -661,7 +661,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
|
||||
|
||||
/* order flag updates with spin locks */
|
||||
if (!list_empty(&list))
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
spin_unlock_irqrestore(&conn->c_lock, flags);
|
||||
|
||||
@ -691,7 +691,7 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
|
||||
}
|
||||
|
||||
/* order flag updates with the rs lock */
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
spin_unlock_irqrestore(&rs->rs_lock, flags);
|
||||
|
||||
|
@ -93,7 +93,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
|
||||
rm->m_ack_seq = tc->t_last_sent_nxt +
|
||||
sizeof(struct rds_header) +
|
||||
be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
|
||||
tc->t_last_expected_una = rm->m_ack_seq + 1;
|
||||
|
||||
|
@ -296,7 +296,7 @@ static void
|
||||
rpcauth_unhash_cred_locked(struct rpc_cred *cred)
|
||||
{
|
||||
hlist_del_rcu(&cred->cr_hash);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
|
||||
gss_get_ctx(ctx);
|
||||
rcu_assign_pointer(gss_cred->gc_ctx, ctx);
|
||||
set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
|
||||
}
|
||||
|
||||
|
@ -257,10 +257,10 @@ void xprt_free_bc_request(struct rpc_rqst *req)
|
||||
|
||||
dprintk("RPC: free backchannel req=%p\n", req);
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
|
||||
clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (!xprt_need_to_requeue(xprt)) {
|
||||
/*
|
||||
|
@ -232,9 +232,9 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
|
||||
{
|
||||
xprt->snd_task = NULL;
|
||||
if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_LOCKED, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
} else
|
||||
queue_work(rpciod_workqueue, &xprt->task_cleanup);
|
||||
}
|
||||
|
@ -851,11 +851,11 @@ static void xs_close(struct rpc_xprt *xprt)
|
||||
xs_reset_transport(transport);
|
||||
xprt->reestablish_timeout = 0;
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
|
||||
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
clear_bit(XPRT_CLOSING, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
xprt_disconnect_done(xprt);
|
||||
}
|
||||
|
||||
@ -1476,12 +1476,12 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
|
||||
|
||||
static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
|
||||
clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
|
||||
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
clear_bit(XPRT_CLOSING, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static void xs_sock_mark_closed(struct rpc_xprt *xprt)
|
||||
@ -1533,10 +1533,10 @@ static void xs_tcp_state_change(struct sock *sk)
|
||||
xprt->connect_cookie++;
|
||||
xprt->reestablish_timeout = 0;
|
||||
set_bit(XPRT_CLOSING, &xprt->state);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_CONNECTED, &xprt->state);
|
||||
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
|
||||
break;
|
||||
case TCP_CLOSE_WAIT:
|
||||
@ -1555,9 +1555,9 @@ static void xs_tcp_state_change(struct sock *sk)
|
||||
case TCP_LAST_ACK:
|
||||
set_bit(XPRT_CLOSING, &xprt->state);
|
||||
xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(XPRT_CONNECTED, &xprt->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
break;
|
||||
case TCP_CLOSE:
|
||||
xs_tcp_cancel_linger_timeout(xprt);
|
||||
|
@ -1205,7 +1205,7 @@ restart:
|
||||
sk->sk_state = TCP_ESTABLISHED;
|
||||
sock_hold(newsk);
|
||||
|
||||
smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
|
||||
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
|
||||
unix_peer(sk) = newsk;
|
||||
|
||||
unix_state_unlock(sk);
|
||||
|
@ -435,7 +435,7 @@ static int snd_bt87x_pcm_open(struct snd_pcm_substream *substream)
|
||||
|
||||
_error:
|
||||
clear_bit(0, &chip->opened);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -450,7 +450,7 @@ static int snd_bt87x_close(struct snd_pcm_substream *substream)
|
||||
|
||||
chip->substream = NULL;
|
||||
clear_bit(0, &chip->opened);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user