mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-28 12:25:31 +00:00
IB/mlx5: Add sniffer support to steering
Add support to create sniffer rule. This rule receive all incoming and outgoing packets from the port. A user could create such rule by using IB_FLOW_ATTR_SNIFFER type. Signed-off-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
d9d4980af2
commit
cc0e5d4235
@ -1649,6 +1649,7 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
|
||||
|
||||
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
|
||||
mlx5_del_flow_rule(iter->rule);
|
||||
put_flow_table(dev, iter->prio, true);
|
||||
list_del(&iter->list);
|
||||
kfree(iter);
|
||||
}
|
||||
@ -1670,10 +1671,16 @@ static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
|
||||
return priority;
|
||||
}
|
||||
|
||||
enum flow_table_type {
|
||||
MLX5_IB_FT_RX,
|
||||
MLX5_IB_FT_TX
|
||||
};
|
||||
|
||||
#define MLX5_FS_MAX_TYPES 10
|
||||
#define MLX5_FS_MAX_ENTRIES 32000UL
|
||||
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
|
||||
struct ib_flow_attr *flow_attr)
|
||||
struct ib_flow_attr *flow_attr,
|
||||
enum flow_table_type ft_type)
|
||||
{
|
||||
bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
|
||||
struct mlx5_flow_namespace *ns = NULL;
|
||||
@ -1704,6 +1711,19 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
|
||||
&num_entries,
|
||||
&num_groups);
|
||||
prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
|
||||
} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
|
||||
if (!MLX5_CAP_FLOWTABLE(dev->mdev,
|
||||
allow_sniffer_and_nic_rx_shared_tir))
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
|
||||
ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
|
||||
MLX5_FLOW_NAMESPACE_SNIFFER_RX :
|
||||
MLX5_FLOW_NAMESPACE_SNIFFER_TX);
|
||||
|
||||
prio = &dev->flow_db.sniffer[ft_type];
|
||||
priority = 0;
|
||||
num_entries = 1;
|
||||
num_groups = 1;
|
||||
}
|
||||
|
||||
if (!ns)
|
||||
@ -1875,6 +1895,43 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
|
||||
return handler;
|
||||
}
|
||||
|
||||
static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_flow_prio *ft_rx,
|
||||
struct mlx5_ib_flow_prio *ft_tx,
|
||||
struct mlx5_flow_destination *dst)
|
||||
{
|
||||
struct mlx5_ib_flow_handler *handler_rx;
|
||||
struct mlx5_ib_flow_handler *handler_tx;
|
||||
int err;
|
||||
static const struct ib_flow_attr flow_attr = {
|
||||
.num_of_specs = 0,
|
||||
.size = sizeof(flow_attr)
|
||||
};
|
||||
|
||||
handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
|
||||
if (IS_ERR(handler_rx)) {
|
||||
err = PTR_ERR(handler_rx);
|
||||
goto err;
|
||||
}
|
||||
|
||||
handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
|
||||
if (IS_ERR(handler_tx)) {
|
||||
err = PTR_ERR(handler_tx);
|
||||
goto err_tx;
|
||||
}
|
||||
|
||||
list_add(&handler_tx->list, &handler_rx->list);
|
||||
|
||||
return handler_rx;
|
||||
|
||||
err_tx:
|
||||
mlx5_del_flow_rule(handler_rx->rule);
|
||||
ft_rx->refcount--;
|
||||
kfree(handler_rx);
|
||||
err:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
||||
struct ib_flow_attr *flow_attr,
|
||||
int domain)
|
||||
@ -1882,6 +1939,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
||||
struct mlx5_ib_dev *dev = to_mdev(qp->device);
|
||||
struct mlx5_ib_flow_handler *handler = NULL;
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
|
||||
struct mlx5_ib_flow_prio *ft_prio;
|
||||
int err;
|
||||
|
||||
@ -1899,11 +1957,19 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
||||
|
||||
mutex_lock(&dev->flow_db.lock);
|
||||
|
||||
ft_prio = get_flow_table(dev, flow_attr);
|
||||
ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
|
||||
if (IS_ERR(ft_prio)) {
|
||||
err = PTR_ERR(ft_prio);
|
||||
goto unlock;
|
||||
}
|
||||
if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
|
||||
ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
|
||||
if (IS_ERR(ft_prio_tx)) {
|
||||
err = PTR_ERR(ft_prio_tx);
|
||||
ft_prio_tx = NULL;
|
||||
goto destroy_ft;
|
||||
}
|
||||
}
|
||||
|
||||
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
|
||||
dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
|
||||
@ -1920,6 +1986,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
||||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
|
||||
handler = create_leftovers_rule(dev, ft_prio, flow_attr,
|
||||
dst);
|
||||
} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
|
||||
handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
goto destroy_ft;
|
||||
@ -1938,6 +2006,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
||||
|
||||
destroy_ft:
|
||||
put_flow_table(dev, ft_prio, false);
|
||||
if (ft_prio_tx)
|
||||
put_flow_table(dev, ft_prio_tx, false);
|
||||
unlock:
|
||||
mutex_unlock(&dev->flow_db.lock);
|
||||
kfree(dst);
|
||||
|
@ -142,6 +142,7 @@ struct mlx5_ib_pd {
|
||||
#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
|
||||
|
||||
#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
|
||||
#define MLX5_IB_NUM_SNIFFER_FTS 2
|
||||
struct mlx5_ib_flow_prio {
|
||||
struct mlx5_flow_table *flow_table;
|
||||
unsigned int refcount;
|
||||
@ -156,6 +157,7 @@ struct mlx5_ib_flow_handler {
|
||||
|
||||
struct mlx5_ib_flow_db {
|
||||
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
|
||||
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
|
||||
/* Protect flow steering bypass flow tables
|
||||
* when add/del flow rules.
|
||||
* only single add/removal of flow steering rule could be done
|
||||
|
Loading…
Reference in New Issue
Block a user