RDMA/bnxt_re: Add HW workaround for avoiding stall for UD QPs

HW stalls out after 0x800000 WQEs are posted for UD QPs.
To workaround this problem, driver will send a modify_qp cmd
to the HW at around the halfway mark(0x400000) so that FW
can accordingly modify the QP context in the HW to prevent this
stall.
This workaround needs to be done for UD, QP1 and Raw Ethertype
packets. Added a counter to keep track of WQEs posted during post_send.

Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Somnath Kotur 2017-05-22 03:15:36 -07:00 committed by Doug Ledford
parent 1c980b010f
commit 3fb755b3d5
4 changed files with 24 additions and 0 deletions

View File

@ -56,6 +56,8 @@
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024) #define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
#define BNXT_RE_UD_QP_HW_STALL 0x400000
struct bnxt_re_work { struct bnxt_re_work {
struct work_struct work; struct work_struct work;
unsigned long event; unsigned long event;

View File

@ -2075,6 +2075,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
return payload_sz; return payload_sz;
} }
static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
{
if ((qp->ib_qp.qp_type == IB_QPT_UD ||
qp->ib_qp.qp_type == IB_QPT_GSI ||
qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
int qp_attr_mask;
struct ib_qp_attr qp_attr;
qp_attr_mask = IB_QP_STATE;
qp_attr.qp_state = IB_QPS_RTS;
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
qp->qplib_qp.wqe_cnt = 0;
}
}
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp, struct bnxt_re_qp *qp,
struct ib_send_wr *wr) struct ib_send_wr *wr)
@ -2120,6 +2136,7 @@ bad:
wr = wr->next; wr = wr->next;
} }
bnxt_qplib_post_send_db(&qp->qplib_qp); bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags); spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc; return rc;
} }
@ -2216,6 +2233,7 @@ bad:
wr = wr->next; wr = wr->next;
} }
bnxt_qplib_post_send_db(&qp->qplib_qp); bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags); spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc; return rc;

View File

@ -1298,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
} }
sq->hwq.prod++; sq->hwq.prod++;
qp->wqe_cnt++;
done: done:
return rc; return rc;
} }

View File

@ -250,6 +250,7 @@ struct bnxt_qplib_qp {
u8 timeout; u8 timeout;
u8 retry_cnt; u8 retry_cnt;
u8 rnr_retry; u8 rnr_retry;
u64 wqe_cnt;
u32 min_rnr_timer; u32 min_rnr_timer;
u32 max_rd_atomic; u32 max_rd_atomic;
u32 max_dest_rd_atomic; u32 max_dest_rd_atomic;