mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-25 03:59:29 +00:00
qede: Honor user request for Tx buffers
Driver always allocates the maximal number of tx-buffers irrespective of actual Tx ring config. Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com> Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ba798b5b6d
commit
5a052d62ab
@ -1297,7 +1297,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
||||
}
|
||||
|
||||
/* Fill the entry in the SW ring and the BDs in the FW ring */
|
||||
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
|
||||
idx = txq->sw_tx_prod;
|
||||
txq->sw_tx_ring.skbs[idx].skb = skb;
|
||||
first_bd = qed_chain_produce(&txq->tx_pbl);
|
||||
memset(first_bd, 0, sizeof(*first_bd));
|
||||
@ -1317,7 +1317,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
||||
|
||||
/* update the first BD with the actual num BDs */
|
||||
first_bd->data.nbds = 1;
|
||||
txq->sw_tx_prod++;
|
||||
txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
|
||||
/* 'next page' entries are counted in the producer value */
|
||||
val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
|
||||
txq->tx_db.data.bd_prod = val;
|
||||
@ -1351,7 +1351,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
||||
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
|
||||
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
|
||||
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
|
||||
txq->sw_tx_cons++;
|
||||
txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
|
||||
txq->sw_tx_ring.skbs[idx].skb = NULL;
|
||||
|
||||
return 0;
|
||||
|
@ -99,7 +99,7 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
|
||||
/* Unmap the data and free skb */
|
||||
int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
|
||||
{
|
||||
u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
|
||||
u16 idx = txq->sw_tx_cons;
|
||||
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
|
||||
struct eth_tx_1st_bd *first_bd;
|
||||
struct eth_tx_bd *tx_data_bd;
|
||||
@ -156,7 +156,7 @@ static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
|
||||
struct eth_tx_1st_bd *first_bd,
|
||||
int nbd, bool data_split)
|
||||
{
|
||||
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
|
||||
u16 idx = txq->sw_tx_prod;
|
||||
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
|
||||
struct eth_tx_bd *tx_data_bd;
|
||||
int i, split_bd_len = 0;
|
||||
@ -333,8 +333,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
|
||||
struct sw_rx_data *metadata, u16 padding, u16 length)
|
||||
{
|
||||
struct qede_tx_queue *txq = fp->xdp_tx;
|
||||
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
|
||||
struct eth_tx_1st_bd *first_bd;
|
||||
u16 idx = txq->sw_tx_prod;
|
||||
|
||||
if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
|
||||
txq->stopped_cnt++;
|
||||
@ -363,7 +363,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
|
||||
|
||||
txq->sw_tx_ring.xdp[idx].page = metadata->data;
|
||||
txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
|
||||
txq->sw_tx_prod++;
|
||||
txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
|
||||
|
||||
/* Mark the fastpath for future XDP doorbell */
|
||||
fp->xdp_xmit = 1;
|
||||
@ -393,14 +393,14 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
|
||||
|
||||
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
|
||||
qed_chain_consume(&txq->tx_pbl);
|
||||
idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
|
||||
idx = txq->sw_tx_cons;
|
||||
|
||||
dma_unmap_page(&edev->pdev->dev,
|
||||
txq->sw_tx_ring.xdp[idx].mapping,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
__free_page(txq->sw_tx_ring.xdp[idx].page);
|
||||
|
||||
txq->sw_tx_cons++;
|
||||
txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
|
||||
txq->xmit_pkts++;
|
||||
}
|
||||
}
|
||||
@ -430,7 +430,7 @@ static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
|
||||
|
||||
bytes_compl += len;
|
||||
pkts_compl++;
|
||||
txq->sw_tx_cons++;
|
||||
txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
|
||||
txq->xmit_pkts++;
|
||||
}
|
||||
|
||||
@ -1455,7 +1455,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
#endif
|
||||
|
||||
/* Fill the entry in the SW ring and the BDs in the FW ring */
|
||||
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
|
||||
idx = txq->sw_tx_prod;
|
||||
txq->sw_tx_ring.skbs[idx].skb = skb;
|
||||
first_bd = (struct eth_tx_1st_bd *)
|
||||
qed_chain_produce(&txq->tx_pbl);
|
||||
@ -1639,7 +1639,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
/* Advance packet producer only before sending the packet since mapping
|
||||
* of pages may fail.
|
||||
*/
|
||||
txq->sw_tx_prod++;
|
||||
txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
|
||||
|
||||
/* 'next page' entries are counted in the producer value */
|
||||
txq->tx_db.data.bd_prod =
|
||||
|
@ -1304,12 +1304,12 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
|
||||
|
||||
/* Allocate the parallel driver ring for Tx buffers */
|
||||
if (txq->is_xdp) {
|
||||
size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
|
||||
size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
|
||||
txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
|
||||
if (!txq->sw_tx_ring.xdp)
|
||||
goto err;
|
||||
} else {
|
||||
size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
|
||||
size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
|
||||
txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
|
||||
if (!txq->sw_tx_ring.skbs)
|
||||
goto err;
|
||||
@ -1319,7 +1319,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
|
||||
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
|
||||
QED_CHAIN_MODE_PBL,
|
||||
QED_CHAIN_CNT_TYPE_U16,
|
||||
TX_RING_SIZE,
|
||||
txq->num_tx_buffers,
|
||||
sizeof(*p_virt), &txq->tx_pbl);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
Loading…
x
Reference in New Issue
Block a user