diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 7382be11afca..4649530ac4e8 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -1034,7 +1034,10 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) /* see post_send() */ barrier(); rvt_put_swqe(wqe); - rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); + rvt_qp_swqe_complete(qp, + wqe, + ib_hfi1_wc_opcode[wqe->wr.opcode], + IB_WC_SUCCESS); } /* * If we were waiting for sends to complete before re-sending, @@ -1081,7 +1084,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, qp->s_last = s_last; /* see post_send() */ barrier(); - rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); + rvt_qp_swqe_complete(qp, + wqe, + ib_hfi1_wc_opcode[wqe->wr.opcode], + IB_WC_SUCCESS); } else { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index aa15bcbfb079..d2eb793b34af 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -920,7 +920,10 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, qp->ibqp.qp_type == IB_QPT_GSI) atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); - rvt_qp_swqe_complete(qp, wqe, status); + rvt_qp_swqe_complete(qp, + wqe, + ib_hfi1_wc_opcode[wqe->wr.opcode], + status); if (qp->s_acked == old_last) qp->s_acked = last; diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 222315fadab1..815cb44b7693 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -296,6 +296,22 @@ static inline bool wss_exceeds_threshold(void) return atomic_read(&wss.total_count) >= wss.threshold; } +/* + * Translate ib_wr_opcode into ib_wc_opcode. + */ +const enum ib_wc_opcode ib_hfi1_wc_opcode[] = { + [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, + [IB_WR_SEND] = IB_WC_SEND, + [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, + [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, + [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, + [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD, + [IB_WR_SEND_WITH_INV] = IB_WC_SEND, + [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV, + [IB_WR_REG_MR] = IB_WC_REG_MR +}; + /* * Length of header by opcode, 0 --> not supported */ diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 12658e3fe154..023498745b8a 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -938,7 +938,10 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) /* see post_send() */ barrier(); rvt_put_swqe(wqe); - rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); + rvt_qp_swqe_complete(qp, + wqe, + ib_qib_wc_opcode[wqe->wr.opcode], + IB_WC_SUCCESS); } /* * If we were waiting for sends to complete before resending, @@ -983,7 +986,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, qp->s_last = s_last; /* see post_send() */ barrier(); - rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); + rvt_qp_swqe_complete(qp, + wqe, + ib_qib_wc_opcode[wqe->wr.opcode], + IB_WC_SUCCESS); } else this_cpu_inc(*ibp->rvp.rc_delayed_comp); diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 17655cc3e6fe..6e1adf709483 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -769,7 +769,10 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, qp->ibqp.qp_type == IB_QPT_GSI) atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); - rvt_qp_swqe_complete(qp, wqe, status); + rvt_qp_swqe_complete(qp, + wqe, + ib_qib_wc_opcode[wqe->wr.opcode], + status); if (qp->s_acked == old_last) qp->s_acked = last; diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 83f8b5f24381..e120efefb8d7 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -113,6 +113,19 @@ static unsigned int ib_qib_disable_sma; module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(disable_sma, "Disable the SMA"); +/* + * Translate ib_wr_opcode into ib_wc_opcode. + */ +const enum ib_wc_opcode ib_qib_wc_opcode[] = { + [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, + [IB_WR_SEND] = IB_WC_SEND, + [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, + [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, + [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, + [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD +}; + /* * System image GUID. */ diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index f5ad8d4bfb39..28fb7241ae6b 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -117,23 +117,6 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { }; EXPORT_SYMBOL(ib_rvt_state_ops); -/* - * Translate ib_wr_opcode into ib_wc_opcode. - */ -const enum ib_wc_opcode ib_rvt_wc_opcode[] = { - [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, - [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, - [IB_WR_SEND] = IB_WC_SEND, - [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, - [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, - [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, - [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD, - [IB_WR_SEND_WITH_INV] = IB_WC_SEND, - [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV, - [IB_WR_REG_MR] = IB_WC_REG_MR -}; -EXPORT_SYMBOL(ib_rvt_wc_opcode); - static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, gfp_t gfp) diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index f3816396c76a..3cdd9e26e96e 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -574,6 +574,7 @@ extern const enum ib_wc_opcode ib_rvt_wc_opcode[]; static inline void rvt_qp_swqe_complete( struct rvt_qp *qp, struct rvt_swqe *wqe, + enum ib_wc_opcode opcode, enum ib_wc_status status) { if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) @@ -586,7 +587,7 @@ static inline void rvt_qp_swqe_complete( memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; wc.status = status; - wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode]; + wc.opcode = opcode; wc.qp = &qp->ibqp; wc.byte_len = wqe->length; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,