mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-15 22:21:29 +00:00
Merge branch 'mlx5-fixes'
Or Gerlitz says: ==================== Mellanox mlx5e driver update, Nov 3 2015 This series contains bunch of small fixes to the mlx5e driver from Achiad. Changes from V0: - removed the driver patch that dealt with IRQ affinity changes during NAPI poll, as this is a generic problem which needs generic solution. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0561e8e878
@ -617,5 +617,11 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
|
||||
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
|
||||
}
|
||||
|
||||
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return min_t(int, mdev->priv.eq_table.num_comp_vectors,
|
||||
MLX5E_MAX_NUM_CHANNELS);
|
||||
}
|
||||
|
||||
extern const struct ethtool_ops mlx5e_ethtool_ops;
|
||||
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
|
||||
|
@ -345,9 +345,8 @@ static void mlx5e_get_channels(struct net_device *dev,
|
||||
struct ethtool_channels *ch)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
|
||||
|
||||
ch->max_combined = ncv;
|
||||
ch->max_combined = mlx5e_get_max_num_channels(priv->mdev);
|
||||
ch->combined_count = priv->params.num_channels;
|
||||
}
|
||||
|
||||
@ -355,7 +354,7 @@ static int mlx5e_set_channels(struct net_device *dev,
|
||||
struct ethtool_channels *ch)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
|
||||
int ncv = mlx5e_get_max_num_channels(priv->mdev);
|
||||
unsigned int count = ch->combined_count;
|
||||
bool was_opened;
|
||||
int err = 0;
|
||||
|
@ -442,12 +442,12 @@ static void mlx5e_disable_rq(struct mlx5e_rq *rq)
|
||||
|
||||
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
|
||||
{
|
||||
unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
|
||||
struct mlx5e_channel *c = rq->channel;
|
||||
struct mlx5e_priv *priv = c->priv;
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 1000; i++) {
|
||||
while (time_before(jiffies, exp_time)) {
|
||||
if (wq->cur_sz >= priv->params.min_rx_wqes)
|
||||
return 0;
|
||||
|
||||
@ -1404,6 +1404,12 @@ int mlx5e_close_locked(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
/* May already be CLOSED in case a previous configuration operation
|
||||
* (e.g RX/TX queue size change) that involves close&open failed.
|
||||
*/
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
return 0;
|
||||
|
||||
clear_bit(MLX5E_STATE_OPENED, &priv->state);
|
||||
|
||||
mlx5e_redirect_rqts(priv);
|
||||
@ -1837,7 +1843,7 @@ static int mlx5e_set_features(struct net_device *netdev,
|
||||
mlx5e_disable_vlan_filter(priv);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
@ -1998,6 +2004,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
|
||||
netdev->vlan_features |= NETIF_F_LRO;
|
||||
|
||||
netdev->hw_features = netdev->vlan_features;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
@ -2041,8 +2048,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *priv;
|
||||
int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
|
||||
MLX5E_MAX_NUM_CHANNELS);
|
||||
int nch = mlx5e_get_max_num_channels(mdev);
|
||||
int err;
|
||||
|
||||
if (mlx5e_check_required_hca_cap(mdev))
|
||||
|
@ -116,7 +116,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
||||
* headers and occur before the data gather.
|
||||
* Therefore these headers must be copied into the WQE
|
||||
*/
|
||||
#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
|
||||
#define MLX5E_MIN_INLINE ETH_HLEN
|
||||
|
||||
if (bf && (skb_headlen(skb) <= sq->max_inline))
|
||||
return skb_headlen(skb);
|
||||
@ -124,6 +124,21 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
|
||||
return MLX5E_MIN_INLINE;
|
||||
}
|
||||
|
||||
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
|
||||
{
|
||||
struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
|
||||
int cpy1_sz = 2 * ETH_ALEN;
|
||||
int cpy2_sz = ihs - cpy1_sz;
|
||||
|
||||
skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
|
||||
skb_pull_inline(skb, cpy1_sz);
|
||||
vhdr->h_vlan_proto = skb->vlan_proto;
|
||||
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
|
||||
skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
|
||||
cpy2_sz);
|
||||
skb_pull_inline(skb, cpy2_sz);
|
||||
}
|
||||
|
||||
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
|
||||
{
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
@ -175,8 +190,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
|
||||
ETH_ZLEN);
|
||||
}
|
||||
|
||||
skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
|
||||
skb_pull_inline(skb, ihs);
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
|
||||
ihs += VLAN_HLEN;
|
||||
} else {
|
||||
skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
|
||||
skb_pull_inline(skb, ihs);
|
||||
}
|
||||
|
||||
eseg->inline_hdr_sz = cpu_to_be16(ihs);
|
||||
|
||||
|
@ -382,10 +382,10 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
|
||||
name, pci_name(dev->pdev));
|
||||
|
||||
eq->eqn = out.eq_number;
|
||||
eq->irqn = vecidx;
|
||||
eq->irqn = priv->msix_arr[vecidx].vector;
|
||||
eq->dev = dev;
|
||||
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
|
||||
err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
|
||||
err = request_irq(eq->irqn, mlx5_msix_handler, 0,
|
||||
priv->irq_info[vecidx].name, eq);
|
||||
if (err)
|
||||
goto err_eq;
|
||||
@ -421,12 +421,12 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
||||
int err;
|
||||
|
||||
mlx5_debug_eq_remove(dev, eq);
|
||||
free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
|
||||
free_irq(eq->irqn, eq);
|
||||
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
|
||||
if (err)
|
||||
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
|
||||
eq->eqn);
|
||||
synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
|
||||
synchronize_irq(eq->irqn);
|
||||
mlx5_buf_free(dev, &eq->buf);
|
||||
|
||||
return err;
|
||||
|
Loading…
x
Reference in New Issue
Block a user