mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
Merge branch 'mlx5 IPsec packet offload support (Part II)'
Leon Romanovsky says: ============ This is second part with implementation of packet offload. ============ Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
This commit is contained in:
commit
1de8fda46f
@ -84,7 +84,8 @@ enum {
|
||||
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
|
||||
#endif
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
|
||||
MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
|
||||
MLX5E_ACCEL_FS_ESP_FT_LEVEL,
|
||||
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
|
||||
#endif
|
||||
};
|
||||
|
@ -45,55 +45,9 @@ static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
|
||||
return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
|
||||
}
|
||||
|
||||
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
|
||||
unsigned int handle)
|
||||
static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
|
||||
{
|
||||
struct mlx5e_ipsec_sa_entry *sa_entry;
|
||||
struct xfrm_state *ret = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
|
||||
if (sa_entry->handle == handle) {
|
||||
ret = sa_entry->x;
|
||||
xfrm_state_hold(ret);
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
{
|
||||
unsigned int handle = sa_entry->ipsec_obj_id;
|
||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||
struct mlx5e_ipsec_sa_entry *_sa_entry;
|
||||
unsigned long flags;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
|
||||
if (_sa_entry->handle == handle) {
|
||||
rcu_read_unlock();
|
||||
return -EEXIST;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
|
||||
sa_entry->handle = handle;
|
||||
hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
|
||||
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
{
|
||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
|
||||
hash_del_rcu(&sa_entry->hlist);
|
||||
spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
|
||||
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
|
||||
}
|
||||
|
||||
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
@ -129,9 +83,33 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
struct mlx5_accel_esp_xfrm_attrs *attrs)
|
||||
static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
struct mlx5_accel_esp_xfrm_attrs *attrs)
|
||||
{
|
||||
struct xfrm_state *x = sa_entry->x;
|
||||
|
||||
attrs->hard_packet_limit = x->lft.hard_packet_limit;
|
||||
if (x->lft.soft_packet_limit == XFRM_INF)
|
||||
return;
|
||||
|
||||
/* Hardware decrements hard_packet_limit counter through
|
||||
* the operation. While fires an event when soft_packet_limit
|
||||
* is reached. It emans that we need substitute the numbers
|
||||
* in order to properly count soft limit.
|
||||
*
|
||||
* As an example:
|
||||
* XFRM user sets soft limit is 2 and hard limit is 9 and
|
||||
* expects to see soft event after 2 packets and hard event
|
||||
* after 9 packets. In our case, the hard limit will be set
|
||||
* to 9 and soft limit is comparator to 7 so user gets the
|
||||
* soft event after 2 packeta
|
||||
*/
|
||||
attrs->soft_packet_limit =
|
||||
x->lft.hard_packet_limit - x->lft.soft_packet_limit;
|
||||
}
|
||||
|
||||
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
struct mlx5_accel_esp_xfrm_attrs *attrs)
|
||||
{
|
||||
struct xfrm_state *x = sa_entry->x;
|
||||
struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
|
||||
@ -157,6 +135,8 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
|
||||
sizeof(aes_gcm->salt));
|
||||
|
||||
attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
|
||||
|
||||
/* iv len */
|
||||
aes_gcm->icv_len = x->aead->alg_icv_len;
|
||||
|
||||
@ -177,6 +157,9 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
|
||||
attrs->family = x->props.family;
|
||||
attrs->type = x->xso.type;
|
||||
attrs->reqid = x->props.reqid;
|
||||
|
||||
mlx5e_ipsec_init_limits(sa_entry, attrs);
|
||||
}
|
||||
|
||||
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
|
||||
@ -208,11 +191,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
|
||||
netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (x->props.mode != XFRM_MODE_TRANSPORT &&
|
||||
x->props.mode != XFRM_MODE_TUNNEL) {
|
||||
dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (x->id.proto != IPPROTO_ESP) {
|
||||
netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
|
||||
return -EINVAL;
|
||||
@ -246,11 +224,32 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
|
||||
netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
|
||||
netdev_info(netdev, "Unsupported xfrm offload type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
|
||||
switch (x->xso.type) {
|
||||
case XFRM_DEV_OFFLOAD_CRYPTO:
|
||||
if (!(mlx5_ipsec_device_caps(priv->mdev) &
|
||||
MLX5_IPSEC_CAP_CRYPTO)) {
|
||||
netdev_info(netdev, "Crypto offload is not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (x->props.mode != XFRM_MODE_TRANSPORT &&
|
||||
x->props.mode != XFRM_MODE_TUNNEL) {
|
||||
netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case XFRM_DEV_OFFLOAD_PACKET:
|
||||
if (!(mlx5_ipsec_device_caps(priv->mdev) &
|
||||
MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
|
||||
netdev_info(netdev, "Packet offload is not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (x->props.mode != XFRM_MODE_TRANSPORT) {
|
||||
netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (x->replay_esn && x->replay_esn->replay_window != 32 &&
|
||||
x->replay_esn->replay_window != 64 &&
|
||||
x->replay_esn->replay_window != 128 &&
|
||||
@ -260,6 +259,31 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
|
||||
x->replay_esn->replay_window);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!x->props.reqid) {
|
||||
netdev_info(netdev, "Cannot offload without reqid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (x->lft.hard_byte_limit != XFRM_INF ||
|
||||
x->lft.soft_byte_limit != XFRM_INF) {
|
||||
netdev_info(netdev,
|
||||
"Device doesn't support limits in bytes\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
|
||||
x->lft.hard_packet_limit != XFRM_INF) {
|
||||
/* XFRM stack doesn't prevent such configuration :(. */
|
||||
netdev_info(netdev,
|
||||
"Hard packet limit must be greater than soft one\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
netdev_info(netdev, "Unsupported xfrm offload type %d\n",
|
||||
x->xso.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -278,6 +302,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
|
||||
{
|
||||
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
|
||||
struct net_device *netdev = x->xso.real_dev;
|
||||
struct mlx5e_ipsec *ipsec;
|
||||
struct mlx5e_priv *priv;
|
||||
int err;
|
||||
|
||||
@ -285,6 +310,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
|
||||
if (!priv->ipsec)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ipsec = priv->ipsec;
|
||||
err = mlx5e_xfrm_validate_state(x);
|
||||
if (err)
|
||||
return err;
|
||||
@ -296,7 +322,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
|
||||
}
|
||||
|
||||
sa_entry->x = x;
|
||||
sa_entry->ipsec = priv->ipsec;
|
||||
sa_entry->ipsec = ipsec;
|
||||
|
||||
/* check esn */
|
||||
mlx5e_ipsec_update_esn_state(sa_entry);
|
||||
@ -311,18 +337,22 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
|
||||
if (err)
|
||||
goto err_hw_ctx;
|
||||
|
||||
if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) {
|
||||
err = mlx5e_ipsec_sadb_rx_add(sa_entry);
|
||||
if (err)
|
||||
goto err_add_rule;
|
||||
} else {
|
||||
/* We use *_bh() variant because xfrm_timer_handler(), which runs
|
||||
* in softirq context, can reach our state delete logic and we need
|
||||
* xa_erase_bh() there.
|
||||
*/
|
||||
err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
|
||||
GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_add_rule;
|
||||
|
||||
if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT)
|
||||
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
|
||||
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
|
||||
}
|
||||
|
||||
INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
|
||||
x->xso.offload_handle = (unsigned long)sa_entry;
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
err_add_rule:
|
||||
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
|
||||
@ -337,9 +367,11 @@ out:
|
||||
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
|
||||
{
|
||||
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
|
||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||
struct mlx5e_ipsec_sa_entry *old;
|
||||
|
||||
if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
|
||||
mlx5e_ipsec_sadb_rx_del(sa_entry);
|
||||
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
|
||||
WARN_ON(old != sa_entry);
|
||||
}
|
||||
|
||||
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
|
||||
@ -366,8 +398,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
|
||||
if (!ipsec)
|
||||
return;
|
||||
|
||||
hash_init(ipsec->sadb_rx);
|
||||
spin_lock_init(&ipsec->sadb_rx_lock);
|
||||
xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
|
||||
ipsec->mdev = priv->mdev;
|
||||
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
|
||||
priv->netdev->name);
|
||||
@ -446,6 +477,122 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
|
||||
queue_work(sa_entry->ipsec->wq, &modify_work->work);
|
||||
}
|
||||
|
||||
static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
|
||||
{
|
||||
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&x->lock);
|
||||
|
||||
if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
|
||||
/* Limits are not configured, as soft limit
|
||||
* must be lowever than hard limit.
|
||||
*/
|
||||
return;
|
||||
|
||||
err = mlx5e_ipsec_aso_query(sa_entry, NULL);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
|
||||
}
|
||||
|
||||
static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
|
||||
{
|
||||
struct net_device *netdev = x->xdo.real_dev;
|
||||
|
||||
if (x->type != XFRM_POLICY_TYPE_MAIN) {
|
||||
netdev_info(netdev, "Cannot offload non-main policy types\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Please pay attention that we support only one template */
|
||||
if (x->xfrm_nr > 1) {
|
||||
netdev_info(netdev, "Cannot offload more than one template\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
|
||||
x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
|
||||
netdev_info(netdev, "Cannot offload forward policy\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!x->xfrm_vec[0].reqid) {
|
||||
netdev_info(netdev, "Cannot offload policy without reqid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
|
||||
netdev_info(netdev, "Unsupported xfrm offload type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
|
||||
struct mlx5_accel_pol_xfrm_attrs *attrs)
|
||||
{
|
||||
struct xfrm_policy *x = pol_entry->x;
|
||||
struct xfrm_selector *sel;
|
||||
|
||||
sel = &x->selector;
|
||||
memset(attrs, 0, sizeof(*attrs));
|
||||
|
||||
memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
|
||||
memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
|
||||
attrs->family = sel->family;
|
||||
attrs->dir = x->xdo.dir;
|
||||
attrs->action = x->action;
|
||||
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
|
||||
attrs->reqid = x->xfrm_vec[0].reqid;
|
||||
}
|
||||
|
||||
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
|
||||
{
|
||||
struct net_device *netdev = x->xdo.real_dev;
|
||||
struct mlx5e_ipsec_pol_entry *pol_entry;
|
||||
struct mlx5e_priv *priv;
|
||||
int err;
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
if (!priv->ipsec)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = mlx5e_xfrm_validate_policy(x);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
|
||||
if (!pol_entry)
|
||||
return -ENOMEM;
|
||||
|
||||
pol_entry->x = x;
|
||||
pol_entry->ipsec = priv->ipsec;
|
||||
|
||||
mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
|
||||
err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
|
||||
if (err)
|
||||
goto err_fs;
|
||||
|
||||
x->xdo.offload_handle = (unsigned long)pol_entry;
|
||||
return 0;
|
||||
|
||||
err_fs:
|
||||
kfree(pol_entry);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
|
||||
{
|
||||
struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
|
||||
|
||||
mlx5e_accel_ipsec_fs_del_pol(pol_entry);
|
||||
kfree(pol_entry);
|
||||
}
|
||||
|
||||
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
|
||||
.xdo_dev_state_add = mlx5e_xfrm_add_state,
|
||||
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
|
||||
@ -454,6 +601,18 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
|
||||
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
|
||||
};
|
||||
|
||||
static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
|
||||
.xdo_dev_state_add = mlx5e_xfrm_add_state,
|
||||
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
|
||||
.xdo_dev_state_free = mlx5e_xfrm_free_state,
|
||||
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
|
||||
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
|
||||
|
||||
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
|
||||
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
|
||||
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
|
||||
};
|
||||
|
||||
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
@ -463,7 +622,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
|
||||
return;
|
||||
|
||||
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
|
||||
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
|
||||
|
||||
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
|
||||
netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
|
||||
else
|
||||
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
|
||||
|
||||
netdev->features |= NETIF_F_HW_ESP;
|
||||
netdev->hw_enc_features |= NETIF_F_HW_ESP;
|
||||
|
||||
|
@ -34,8 +34,6 @@
|
||||
#ifndef __MLX5E_IPSEC_H__
|
||||
#define __MLX5E_IPSEC_H__
|
||||
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
|
||||
#include <linux/mlx5/device.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <linux/idr.h>
|
||||
@ -76,6 +74,10 @@ struct mlx5_accel_esp_xfrm_attrs {
|
||||
u8 type : 2;
|
||||
u8 family;
|
||||
u32 replay_window;
|
||||
u32 authsize;
|
||||
u32 reqid;
|
||||
u64 hard_packet_limit;
|
||||
u64 soft_packet_limit;
|
||||
};
|
||||
|
||||
enum mlx5_ipsec_cap {
|
||||
@ -86,6 +88,17 @@ enum mlx5_ipsec_cap {
|
||||
|
||||
struct mlx5e_priv;
|
||||
|
||||
struct mlx5e_ipsec_hw_stats {
|
||||
u64 ipsec_rx_pkts;
|
||||
u64 ipsec_rx_bytes;
|
||||
u64 ipsec_rx_drop_pkts;
|
||||
u64 ipsec_rx_drop_bytes;
|
||||
u64 ipsec_tx_pkts;
|
||||
u64 ipsec_tx_bytes;
|
||||
u64 ipsec_tx_drop_pkts;
|
||||
u64 ipsec_tx_drop_bytes;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec_sw_stats {
|
||||
atomic64_t ipsec_rx_drop_sp_alloc;
|
||||
atomic64_t ipsec_rx_drop_sadb_miss;
|
||||
@ -99,23 +112,35 @@ struct mlx5e_ipsec_sw_stats {
|
||||
struct mlx5e_ipsec_rx;
|
||||
struct mlx5e_ipsec_tx;
|
||||
|
||||
struct mlx5e_ipsec_work {
|
||||
struct work_struct work;
|
||||
struct mlx5e_ipsec *ipsec;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec_aso {
|
||||
u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
|
||||
dma_addr_t dma_addr;
|
||||
struct mlx5_aso *aso;
|
||||
/* IPsec ASO caches data on every query call,
|
||||
* so in nested calls, we can use this boolean to save
|
||||
* recursive calls to mlx5e_ipsec_aso_query()
|
||||
*/
|
||||
u8 use_cache : 1;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec {
|
||||
struct mlx5_core_dev *mdev;
|
||||
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
|
||||
spinlock_t sadb_rx_lock; /* Protects sadb_rx */
|
||||
struct xarray sadb;
|
||||
struct mlx5e_ipsec_sw_stats sw_stats;
|
||||
struct mlx5e_ipsec_hw_stats hw_stats;
|
||||
struct workqueue_struct *wq;
|
||||
struct mlx5e_flow_steering *fs;
|
||||
struct mlx5e_ipsec_rx *rx_ipv4;
|
||||
struct mlx5e_ipsec_rx *rx_ipv6;
|
||||
struct mlx5e_ipsec_tx *tx;
|
||||
struct mlx5e_ipsec_aso *aso;
|
||||
struct notifier_block nb;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec_esn_state {
|
||||
@ -127,6 +152,7 @@ struct mlx5e_ipsec_esn_state {
|
||||
struct mlx5e_ipsec_rule {
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct mlx5_modify_hdr *modify_hdr;
|
||||
struct mlx5_pkt_reformat *pkt_reformat;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec_modify_state_work {
|
||||
@ -135,9 +161,7 @@ struct mlx5e_ipsec_modify_state_work {
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec_sa_entry {
|
||||
struct hlist_node hlist; /* Item in SADB_RX hashtable */
|
||||
struct mlx5e_ipsec_esn_state esn_state;
|
||||
unsigned int handle; /* Handle in SADB_RX */
|
||||
struct xfrm_state *x;
|
||||
struct mlx5e_ipsec *ipsec;
|
||||
struct mlx5_accel_esp_xfrm_attrs attrs;
|
||||
@ -149,17 +173,43 @@ struct mlx5e_ipsec_sa_entry {
|
||||
struct mlx5e_ipsec_modify_state_work modify_work;
|
||||
};
|
||||
|
||||
struct mlx5_accel_pol_xfrm_attrs {
|
||||
union {
|
||||
__be32 a4;
|
||||
__be32 a6[4];
|
||||
} saddr;
|
||||
|
||||
union {
|
||||
__be32 a4;
|
||||
__be32 a6[4];
|
||||
} daddr;
|
||||
|
||||
u8 family;
|
||||
u8 action;
|
||||
u8 type : 2;
|
||||
u8 dir : 2;
|
||||
u32 reqid;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec_pol_entry {
|
||||
struct xfrm_policy *x;
|
||||
struct mlx5e_ipsec *ipsec;
|
||||
struct mlx5e_ipsec_rule ipsec_rule;
|
||||
struct mlx5_accel_pol_xfrm_attrs attrs;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
|
||||
void mlx5e_ipsec_init(struct mlx5e_priv *priv);
|
||||
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
|
||||
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
|
||||
|
||||
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
|
||||
unsigned int handle);
|
||||
|
||||
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
|
||||
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
|
||||
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
|
||||
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
|
||||
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
|
||||
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
|
||||
|
||||
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
|
||||
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
|
||||
@ -172,11 +222,27 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
|
||||
void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
|
||||
|
||||
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
struct mlx5_wqe_aso_ctrl_seg *data);
|
||||
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
u64 *packets);
|
||||
|
||||
void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
|
||||
void *ipsec_stats);
|
||||
|
||||
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
struct mlx5_accel_esp_xfrm_attrs *attrs);
|
||||
static inline struct mlx5_core_dev *
|
||||
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
{
|
||||
return sa_entry->ipsec->mdev;
|
||||
}
|
||||
|
||||
static inline struct mlx5_core_dev *
|
||||
mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
|
||||
{
|
||||
return pol_entry->ipsec->mdev;
|
||||
}
|
||||
#else
|
||||
static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
|
@ -9,8 +9,14 @@
|
||||
|
||||
#define NUM_IPSEC_FTE BIT(15)
|
||||
|
||||
struct mlx5e_ipsec_fc {
|
||||
struct mlx5_fc *cnt;
|
||||
struct mlx5_fc *drop;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec_ft {
|
||||
struct mutex mutex; /* Protect changes to this struct */
|
||||
struct mlx5_flow_table *pol;
|
||||
struct mlx5_flow_table *sa;
|
||||
struct mlx5_flow_table *status;
|
||||
u32 refcnt;
|
||||
@ -23,13 +29,17 @@ struct mlx5e_ipsec_miss {
|
||||
|
||||
struct mlx5e_ipsec_rx {
|
||||
struct mlx5e_ipsec_ft ft;
|
||||
struct mlx5e_ipsec_miss pol;
|
||||
struct mlx5e_ipsec_miss sa;
|
||||
struct mlx5e_ipsec_rule status;
|
||||
struct mlx5e_ipsec_fc *fc;
|
||||
};
|
||||
|
||||
struct mlx5e_ipsec_tx {
|
||||
struct mlx5e_ipsec_ft ft;
|
||||
struct mlx5e_ipsec_miss pol;
|
||||
struct mlx5_flow_namespace *ns;
|
||||
struct mlx5e_ipsec_fc *fc;
|
||||
};
|
||||
|
||||
/* IPsec RX flow steering */
|
||||
@ -90,9 +100,10 @@ static int ipsec_status_rule(struct mlx5_core_dev *mdev,
|
||||
|
||||
/* create fte */
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
|
||||
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
||||
MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
flow_act.modify_hdr = modify_hdr;
|
||||
fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 1);
|
||||
fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
|
||||
if (IS_ERR(fte)) {
|
||||
err = PTR_ERR(fte);
|
||||
mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
|
||||
@ -157,6 +168,10 @@ out:
|
||||
|
||||
static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
|
||||
{
|
||||
mlx5_del_flow_rules(rx->pol.rule);
|
||||
mlx5_destroy_flow_group(rx->pol.group);
|
||||
mlx5_destroy_flow_table(rx->ft.pol);
|
||||
|
||||
mlx5_del_flow_rules(rx->sa.rule);
|
||||
mlx5_destroy_flow_group(rx->sa.group);
|
||||
mlx5_destroy_flow_table(rx->ft.sa);
|
||||
@ -171,7 +186,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
|
||||
{
|
||||
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
|
||||
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
|
||||
struct mlx5_flow_destination dest;
|
||||
struct mlx5_flow_destination dest[2];
|
||||
struct mlx5_flow_table *ft;
|
||||
int err;
|
||||
|
||||
@ -182,26 +197,47 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
|
||||
|
||||
rx->ft.status = ft;
|
||||
|
||||
dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
|
||||
err = ipsec_status_rule(mdev, rx, &dest);
|
||||
dest[0] = mlx5_ttc_get_default_dest(ttc, family2tt(family));
|
||||
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
|
||||
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
|
||||
err = ipsec_status_rule(mdev, rx, dest);
|
||||
if (err)
|
||||
goto err_add;
|
||||
|
||||
/* Create FT */
|
||||
ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
|
||||
1);
|
||||
2);
|
||||
if (IS_ERR(ft)) {
|
||||
err = PTR_ERR(ft);
|
||||
goto err_fs_ft;
|
||||
}
|
||||
rx->ft.sa = ft;
|
||||
|
||||
err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &dest);
|
||||
err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
|
||||
if (err)
|
||||
goto err_fs;
|
||||
|
||||
ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
|
||||
2);
|
||||
if (IS_ERR(ft)) {
|
||||
err = PTR_ERR(ft);
|
||||
goto err_pol_ft;
|
||||
}
|
||||
rx->ft.pol = ft;
|
||||
memset(dest, 0x00, 2 * sizeof(*dest));
|
||||
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
dest[0].ft = rx->ft.sa;
|
||||
err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
|
||||
if (err)
|
||||
goto err_pol_miss;
|
||||
|
||||
return 0;
|
||||
|
||||
err_pol_miss:
|
||||
mlx5_destroy_flow_table(rx->ft.pol);
|
||||
err_pol_ft:
|
||||
mlx5_del_flow_rules(rx->sa.rule);
|
||||
mlx5_destroy_flow_group(rx->sa.group);
|
||||
err_fs:
|
||||
mlx5_destroy_flow_table(rx->ft.sa);
|
||||
err_fs_ft:
|
||||
@ -236,7 +272,7 @@ static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
|
||||
|
||||
/* connect */
|
||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
dest.ft = rx->ft.sa;
|
||||
dest.ft = rx->ft.pol;
|
||||
mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
|
||||
|
||||
skip:
|
||||
@ -277,14 +313,34 @@ out:
|
||||
/* IPsec TX flow steering */
|
||||
static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
|
||||
{
|
||||
struct mlx5_flow_destination dest = {};
|
||||
struct mlx5_flow_table *ft;
|
||||
int err;
|
||||
|
||||
ft = ipsec_ft_create(tx->ns, 0, 0, 1);
|
||||
ft = ipsec_ft_create(tx->ns, 1, 0, 4);
|
||||
if (IS_ERR(ft))
|
||||
return PTR_ERR(ft);
|
||||
|
||||
tx->ft.sa = ft;
|
||||
|
||||
ft = ipsec_ft_create(tx->ns, 0, 0, 2);
|
||||
if (IS_ERR(ft)) {
|
||||
err = PTR_ERR(ft);
|
||||
goto err_pol_ft;
|
||||
}
|
||||
tx->ft.pol = ft;
|
||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
dest.ft = tx->ft.sa;
|
||||
err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
|
||||
if (err)
|
||||
goto err_pol_miss;
|
||||
return 0;
|
||||
|
||||
err_pol_miss:
|
||||
mlx5_destroy_flow_table(tx->ft.pol);
|
||||
err_pol_ft:
|
||||
mlx5_destroy_flow_table(tx->ft.sa);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
|
||||
@ -318,6 +374,9 @@ static void tx_ft_put(struct mlx5e_ipsec *ipsec)
|
||||
if (tx->ft.refcnt)
|
||||
goto out;
|
||||
|
||||
mlx5_del_flow_rules(tx->pol.rule);
|
||||
mlx5_destroy_flow_group(tx->pol.group);
|
||||
mlx5_destroy_flow_table(tx->ft.pol);
|
||||
mlx5_destroy_flow_table(tx->ft.sa);
|
||||
out:
|
||||
mutex_unlock(&tx->ft.mutex);
|
||||
@ -397,6 +456,17 @@ static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
|
||||
misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
|
||||
}
|
||||
|
||||
static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
|
||||
{
|
||||
/* Pass policy check before choosing this SA */
|
||||
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
|
||||
|
||||
MLX5_SET(fte_match_param, spec->match_criteria,
|
||||
misc_parameters_2.metadata_reg_c_0, reqid);
|
||||
MLX5_SET(fte_match_param, spec->match_value,
|
||||
misc_parameters_2.metadata_reg_c_0, reqid);
|
||||
}
|
||||
|
||||
static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
|
||||
struct mlx5_flow_act *flow_act)
|
||||
{
|
||||
@ -411,6 +481,11 @@ static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
|
||||
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
|
||||
ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
|
||||
break;
|
||||
case XFRM_DEV_OFFLOAD_OUT:
|
||||
MLX5_SET(set_action_in, action, field,
|
||||
MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
|
||||
ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -431,9 +506,50 @@ static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
|
||||
struct mlx5_accel_esp_xfrm_attrs *attrs,
|
||||
struct mlx5_flow_act *flow_act)
|
||||
{
|
||||
enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
|
||||
struct mlx5_pkt_reformat_params reformat_params = {};
|
||||
struct mlx5_pkt_reformat *pkt_reformat;
|
||||
u8 reformatbf[16] = {};
|
||||
__be32 spi;
|
||||
|
||||
if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
|
||||
reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
|
||||
ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
|
||||
goto cmd;
|
||||
}
|
||||
|
||||
if (attrs->family == AF_INET)
|
||||
reformat_params.type =
|
||||
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
|
||||
else
|
||||
reformat_params.type =
|
||||
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
|
||||
|
||||
/* convert to network format */
|
||||
spi = htonl(attrs->spi);
|
||||
memcpy(reformatbf, &spi, 4);
|
||||
|
||||
reformat_params.param_0 = attrs->authsize;
|
||||
reformat_params.size = sizeof(reformatbf);
|
||||
reformat_params.data = &reformatbf;
|
||||
|
||||
cmd:
|
||||
pkt_reformat =
|
||||
mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
|
||||
if (IS_ERR(pkt_reformat))
|
||||
return PTR_ERR(pkt_reformat);
|
||||
|
||||
flow_act->pkt_reformat = pkt_reformat;
|
||||
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
{
|
||||
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
|
||||
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
|
||||
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
|
||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||
@ -468,6 +584,16 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
if (err)
|
||||
goto err_mod_header;
|
||||
|
||||
switch (attrs->type) {
|
||||
case XFRM_DEV_OFFLOAD_PACKET:
|
||||
err = setup_pkt_reformat(mdev, attrs, &flow_act);
|
||||
if (err)
|
||||
goto err_pkt_reformat;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
|
||||
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
|
||||
flow_act.flags |= FLOW_ACT_NO_APPEND;
|
||||
@ -483,11 +609,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
}
|
||||
kvfree(spec);
|
||||
|
||||
ipsec_rule->rule = rule;
|
||||
ipsec_rule->modify_hdr = flow_act.modify_hdr;
|
||||
sa_entry->ipsec_rule.rule = rule;
|
||||
sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
|
||||
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
|
||||
return 0;
|
||||
|
||||
err_add_flow:
|
||||
if (flow_act.pkt_reformat)
|
||||
mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
|
||||
err_pkt_reformat:
|
||||
mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
|
||||
err_mod_header:
|
||||
kvfree(spec);
|
||||
@ -501,6 +631,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
|
||||
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
|
||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||
struct mlx5_flow_destination dest = {};
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct mlx5_flow_spec *spec;
|
||||
@ -514,7 +645,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
if (attrs->family == AF_INET)
|
||||
@ -522,32 +653,305 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
else
|
||||
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
|
||||
|
||||
setup_fte_spi(spec, attrs->spi);
|
||||
setup_fte_esp(spec);
|
||||
setup_fte_no_frags(spec);
|
||||
setup_fte_reg_a(spec);
|
||||
|
||||
switch (attrs->type) {
|
||||
case XFRM_DEV_OFFLOAD_CRYPTO:
|
||||
setup_fte_spi(spec, attrs->spi);
|
||||
setup_fte_esp(spec);
|
||||
setup_fte_reg_a(spec);
|
||||
break;
|
||||
case XFRM_DEV_OFFLOAD_PACKET:
|
||||
setup_fte_reg_c0(spec, attrs->reqid);
|
||||
err = setup_pkt_reformat(mdev, attrs, &flow_act);
|
||||
if (err)
|
||||
goto err_pkt_reformat;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
|
||||
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
|
||||
flow_act.flags |= FLOW_ACT_NO_APPEND;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
|
||||
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
|
||||
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, NULL, 0);
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW |
|
||||
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
|
||||
MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
|
||||
dest.counter_id = mlx5_fc_id(tx->fc->cnt);
|
||||
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, &dest, 1);
|
||||
if (IS_ERR(rule)) {
|
||||
err = PTR_ERR(rule);
|
||||
mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
|
||||
goto out;
|
||||
goto err_add_flow;
|
||||
}
|
||||
|
||||
sa_entry->ipsec_rule.rule = rule;
|
||||
|
||||
out:
|
||||
kvfree(spec);
|
||||
if (err)
|
||||
tx_ft_put(ipsec);
|
||||
sa_entry->ipsec_rule.rule = rule;
|
||||
sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
|
||||
return 0;
|
||||
|
||||
err_add_flow:
|
||||
if (flow_act.pkt_reformat)
|
||||
mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
|
||||
err_pkt_reformat:
|
||||
kvfree(spec);
|
||||
err_alloc:
|
||||
tx_ft_put(ipsec);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
|
||||
{
|
||||
struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
|
||||
struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
|
||||
struct mlx5_flow_destination dest[2] = {};
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5e_ipsec_tx *tx;
|
||||
int err, dstn = 0;
|
||||
|
||||
tx = tx_ft_get(mdev, pol_entry->ipsec);
|
||||
if (IS_ERR(tx))
|
||||
return PTR_ERR(tx);
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec) {
|
||||
err = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
if (attrs->family == AF_INET)
|
||||
setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
|
||||
else
|
||||
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
|
||||
|
||||
setup_fte_no_frags(spec);
|
||||
|
||||
err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
|
||||
&flow_act);
|
||||
if (err)
|
||||
goto err_mod_header;
|
||||
|
||||
switch (attrs->action) {
|
||||
case XFRM_POLICY_ALLOW:
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
break;
|
||||
case XFRM_POLICY_BLOCK:
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
|
||||
MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
|
||||
dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
|
||||
dstn++;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(true);
|
||||
err = -EINVAL;
|
||||
goto err_action;
|
||||
}
|
||||
|
||||
flow_act.flags |= FLOW_ACT_NO_APPEND;
|
||||
dest[dstn].ft = tx->ft.sa;
|
||||
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
dstn++;
|
||||
rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, dest, dstn);
|
||||
if (IS_ERR(rule)) {
|
||||
err = PTR_ERR(rule);
|
||||
mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
|
||||
goto err_action;
|
||||
}
|
||||
|
||||
kvfree(spec);
|
||||
pol_entry->ipsec_rule.rule = rule;
|
||||
pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
|
||||
return 0;
|
||||
|
||||
err_action:
|
||||
mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
|
||||
err_mod_header:
|
||||
kvfree(spec);
|
||||
err_alloc:
|
||||
tx_ft_put(pol_entry->ipsec);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
|
||||
{
|
||||
struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
|
||||
struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
|
||||
struct mlx5_flow_destination dest[2];
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5e_ipsec_rx *rx;
|
||||
int err, dstn = 0;
|
||||
|
||||
rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family);
|
||||
if (IS_ERR(rx))
|
||||
return PTR_ERR(rx);
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec) {
|
||||
err = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
if (attrs->family == AF_INET)
|
||||
setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
|
||||
else
|
||||
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
|
||||
|
||||
setup_fte_no_frags(spec);
|
||||
|
||||
switch (attrs->action) {
|
||||
case XFRM_POLICY_ALLOW:
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
break;
|
||||
case XFRM_POLICY_BLOCK:
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
|
||||
dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
|
||||
dstn++;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(true);
|
||||
err = -EINVAL;
|
||||
goto err_action;
|
||||
}
|
||||
|
||||
flow_act.flags |= FLOW_ACT_NO_APPEND;
|
||||
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
dest[dstn].ft = rx->ft.sa;
|
||||
dstn++;
|
||||
rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, dest, dstn);
|
||||
if (IS_ERR(rule)) {
|
||||
err = PTR_ERR(rule);
|
||||
mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
|
||||
goto err_action;
|
||||
}
|
||||
|
||||
kvfree(spec);
|
||||
pol_entry->ipsec_rule.rule = rule;
|
||||
return 0;
|
||||
|
||||
err_action:
|
||||
kvfree(spec);
|
||||
err_alloc:
|
||||
rx_ft_put(mdev, pol_entry->ipsec, attrs->family);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
|
||||
{
|
||||
struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
|
||||
struct mlx5_core_dev *mdev = ipsec->mdev;
|
||||
struct mlx5e_ipsec_tx *tx = ipsec->tx;
|
||||
|
||||
mlx5_fc_destroy(mdev, tx->fc->drop);
|
||||
mlx5_fc_destroy(mdev, tx->fc->cnt);
|
||||
kfree(tx->fc);
|
||||
mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
|
||||
mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
|
||||
kfree(rx_ipv4->fc);
|
||||
}
|
||||
|
||||
static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
|
||||
{
|
||||
struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
|
||||
struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
|
||||
struct mlx5_core_dev *mdev = ipsec->mdev;
|
||||
struct mlx5e_ipsec_tx *tx = ipsec->tx;
|
||||
struct mlx5e_ipsec_fc *fc;
|
||||
struct mlx5_fc *counter;
|
||||
int err;
|
||||
|
||||
fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL);
|
||||
if (!fc)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Both IPv4 and IPv6 point to same flow counters struct. */
|
||||
rx_ipv4->fc = fc;
|
||||
rx_ipv6->fc = fc;
|
||||
counter = mlx5_fc_create(mdev, false);
|
||||
if (IS_ERR(counter)) {
|
||||
err = PTR_ERR(counter);
|
||||
goto err_rx_cnt;
|
||||
}
|
||||
|
||||
fc->cnt = counter;
|
||||
counter = mlx5_fc_create(mdev, false);
|
||||
if (IS_ERR(counter)) {
|
||||
err = PTR_ERR(counter);
|
||||
goto err_rx_drop;
|
||||
}
|
||||
|
||||
fc->drop = counter;
|
||||
fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
|
||||
if (!fc) {
|
||||
err = -ENOMEM;
|
||||
goto err_tx_fc;
|
||||
}
|
||||
|
||||
tx->fc = fc;
|
||||
counter = mlx5_fc_create(mdev, false);
|
||||
if (IS_ERR(counter)) {
|
||||
err = PTR_ERR(counter);
|
||||
goto err_tx_cnt;
|
||||
}
|
||||
|
||||
fc->cnt = counter;
|
||||
counter = mlx5_fc_create(mdev, false);
|
||||
if (IS_ERR(counter)) {
|
||||
err = PTR_ERR(counter);
|
||||
goto err_tx_drop;
|
||||
}
|
||||
|
||||
fc->drop = counter;
|
||||
return 0;
|
||||
|
||||
err_tx_drop:
|
||||
mlx5_fc_destroy(mdev, tx->fc->cnt);
|
||||
err_tx_cnt:
|
||||
kfree(tx->fc);
|
||||
err_tx_fc:
|
||||
mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
|
||||
err_rx_drop:
|
||||
mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
|
||||
err_rx_cnt:
|
||||
kfree(rx_ipv4->fc);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_ipsec *ipsec = priv->ipsec;
|
||||
struct mlx5e_ipsec_hw_stats *stats;
|
||||
struct mlx5e_ipsec_fc *fc;
|
||||
|
||||
stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
|
||||
|
||||
stats->ipsec_rx_pkts = 0;
|
||||
stats->ipsec_rx_bytes = 0;
|
||||
stats->ipsec_rx_drop_pkts = 0;
|
||||
stats->ipsec_rx_drop_bytes = 0;
|
||||
stats->ipsec_tx_pkts = 0;
|
||||
stats->ipsec_tx_bytes = 0;
|
||||
stats->ipsec_tx_drop_pkts = 0;
|
||||
stats->ipsec_tx_drop_bytes = 0;
|
||||
|
||||
fc = ipsec->rx_ipv4->fc;
|
||||
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
|
||||
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
|
||||
&stats->ipsec_rx_drop_bytes);
|
||||
|
||||
fc = ipsec->tx->fc;
|
||||
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
|
||||
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
|
||||
&stats->ipsec_tx_drop_bytes);
|
||||
}
|
||||
|
||||
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
{
|
||||
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
|
||||
@ -563,6 +967,9 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
|
||||
mlx5_del_flow_rules(ipsec_rule->rule);
|
||||
|
||||
if (ipsec_rule->pkt_reformat)
|
||||
mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
|
||||
|
||||
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
|
||||
tx_ft_put(sa_entry->ipsec);
|
||||
return;
|
||||
@ -572,11 +979,36 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
|
||||
}
|
||||
|
||||
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
|
||||
{
|
||||
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
|
||||
return tx_add_policy(pol_entry);
|
||||
|
||||
return rx_add_policy(pol_entry);
|
||||
}
|
||||
|
||||
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
|
||||
{
|
||||
struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
|
||||
struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
|
||||
|
||||
mlx5_del_flow_rules(ipsec_rule->rule);
|
||||
|
||||
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
|
||||
rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
|
||||
return;
|
||||
}
|
||||
|
||||
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
|
||||
tx_ft_put(pol_entry->ipsec);
|
||||
}
|
||||
|
||||
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
|
||||
{
|
||||
if (!ipsec->tx)
|
||||
return;
|
||||
|
||||
ipsec_fs_destroy_counters(ipsec);
|
||||
mutex_destroy(&ipsec->tx->ft.mutex);
|
||||
WARN_ON(ipsec->tx->ft.refcnt);
|
||||
kfree(ipsec->tx);
|
||||
@ -612,6 +1044,10 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
|
||||
if (!ipsec->rx_ipv6)
|
||||
goto err_rx_ipv6;
|
||||
|
||||
err = ipsec_fs_init_counters(ipsec);
|
||||
if (err)
|
||||
goto err_counters;
|
||||
|
||||
mutex_init(&ipsec->tx->ft.mutex);
|
||||
mutex_init(&ipsec->rx_ipv4->ft.mutex);
|
||||
mutex_init(&ipsec->rx_ipv6->ft.mutex);
|
||||
@ -619,6 +1055,8 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
|
||||
|
||||
return 0;
|
||||
|
||||
err_counters:
|
||||
kfree(ipsec->rx_ipv6);
|
||||
err_rx_ipv6:
|
||||
kfree(ipsec->rx_ipv4);
|
||||
err_rx_ipv4:
|
||||
|
@ -6,6 +6,10 @@
|
||||
#include "ipsec.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
enum {
|
||||
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
|
||||
};
|
||||
|
||||
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
u32 caps = 0;
|
||||
@ -83,6 +87,20 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
|
||||
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
|
||||
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
|
||||
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
|
||||
|
||||
if (attrs->hard_packet_limit != XFRM_INF) {
|
||||
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
|
||||
lower_32_bits(attrs->hard_packet_limit));
|
||||
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
|
||||
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
|
||||
}
|
||||
|
||||
if (attrs->soft_packet_limit != XFRM_INF) {
|
||||
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
|
||||
lower_32_bits(attrs->soft_packet_limit));
|
||||
|
||||
MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
@ -246,6 +264,113 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
const struct mlx5_accel_esp_xfrm_attrs *attrs)
|
||||
{
|
||||
struct mlx5_wqe_aso_ctrl_seg data = {};
|
||||
|
||||
data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
|
||||
data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
|
||||
<< 4;
|
||||
data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
|
||||
data.bitwise_data = cpu_to_be64(BIT_ULL(54));
|
||||
data.data_mask = data.bitwise_data;
|
||||
|
||||
mlx5e_ipsec_aso_query(sa_entry, &data);
|
||||
}
|
||||
|
||||
static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
u32 mode_param)
|
||||
{
|
||||
struct mlx5_accel_esp_xfrm_attrs attrs = {};
|
||||
|
||||
if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
|
||||
sa_entry->esn_state.esn++;
|
||||
sa_entry->esn_state.overlap = 0;
|
||||
} else {
|
||||
sa_entry->esn_state.overlap = 1;
|
||||
}
|
||||
|
||||
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
|
||||
mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
|
||||
mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
|
||||
}
|
||||
|
||||
static void mlx5e_ipsec_handle_event(struct work_struct *_work)
|
||||
{
|
||||
struct mlx5e_ipsec_work *work =
|
||||
container_of(_work, struct mlx5e_ipsec_work, work);
|
||||
struct mlx5_accel_esp_xfrm_attrs *attrs;
|
||||
struct mlx5e_ipsec_sa_entry *sa_entry;
|
||||
struct mlx5e_ipsec_aso *aso;
|
||||
struct mlx5e_ipsec *ipsec;
|
||||
int ret;
|
||||
|
||||
sa_entry = xa_load(&work->ipsec->sadb, work->id);
|
||||
if (!sa_entry)
|
||||
goto out;
|
||||
|
||||
ipsec = sa_entry->ipsec;
|
||||
aso = ipsec->aso;
|
||||
attrs = &sa_entry->attrs;
|
||||
|
||||
spin_lock(&sa_entry->x->lock);
|
||||
ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
aso->use_cache = true;
|
||||
if (attrs->esn_trigger &&
|
||||
!MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
|
||||
u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
|
||||
|
||||
mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
|
||||
}
|
||||
|
||||
if (attrs->soft_packet_limit != XFRM_INF)
|
||||
if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
|
||||
!MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
|
||||
!MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
|
||||
xfrm_state_check_expire(sa_entry->x);
|
||||
aso->use_cache = false;
|
||||
|
||||
unlock:
|
||||
spin_unlock(&sa_entry->x->lock);
|
||||
out:
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
|
||||
void *data)
|
||||
{
|
||||
struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
|
||||
struct mlx5_eqe_obj_change *object;
|
||||
struct mlx5e_ipsec_work *work;
|
||||
struct mlx5_eqe *eqe = data;
|
||||
u16 type;
|
||||
|
||||
if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
object = &eqe->data.obj_change;
|
||||
type = be16_to_cpu(object->obj_type);
|
||||
|
||||
if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
|
||||
work->ipsec = ipsec;
|
||||
work->id = be32_to_cpu(object->obj_id);
|
||||
|
||||
queue_work(ipsec->wq, &work->work);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = ipsec->mdev;
|
||||
@ -273,6 +398,9 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
|
||||
goto err_aso_create;
|
||||
}
|
||||
|
||||
ipsec->nb.notifier_call = mlx5e_ipsec_event;
|
||||
mlx5_notifier_register(mdev, &ipsec->nb);
|
||||
|
||||
ipsec->aso = aso;
|
||||
return 0;
|
||||
|
||||
@ -293,8 +421,76 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
|
||||
aso = ipsec->aso;
|
||||
pdev = mlx5_core_dma_dev(mdev);
|
||||
|
||||
mlx5_notifier_unregister(mdev, &ipsec->nb);
|
||||
mlx5_aso_destroy(aso->aso);
|
||||
dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
|
||||
DMA_BIDIRECTIONAL);
|
||||
kfree(aso);
|
||||
}
|
||||
|
||||
static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
|
||||
struct mlx5_wqe_aso_ctrl_seg *data)
|
||||
{
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
ctrl->data_mask_mode = data->data_mask_mode;
|
||||
ctrl->condition_1_0_operand = data->condition_1_0_operand;
|
||||
ctrl->condition_1_0_offset = data->condition_1_0_offset;
|
||||
ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
|
||||
ctrl->condition_0_data = data->condition_0_data;
|
||||
ctrl->condition_0_mask = data->condition_0_mask;
|
||||
ctrl->condition_1_data = data->condition_1_data;
|
||||
ctrl->condition_1_mask = data->condition_1_mask;
|
||||
ctrl->bitwise_data = data->bitwise_data;
|
||||
ctrl->data_mask = data->data_mask;
|
||||
}
|
||||
|
||||
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
struct mlx5_wqe_aso_ctrl_seg *data)
|
||||
{
|
||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||
struct mlx5e_ipsec_aso *aso = ipsec->aso;
|
||||
struct mlx5_core_dev *mdev = ipsec->mdev;
|
||||
struct mlx5_wqe_aso_ctrl_seg *ctrl;
|
||||
struct mlx5e_hw_objs *res;
|
||||
struct mlx5_aso_wqe *wqe;
|
||||
u8 ds_cnt;
|
||||
|
||||
lockdep_assert_held(&sa_entry->x->lock);
|
||||
if (aso->use_cache)
|
||||
return 0;
|
||||
|
||||
res = &mdev->mlx5e_res.hw_objs;
|
||||
|
||||
memset(aso->ctx, 0, sizeof(aso->ctx));
|
||||
wqe = mlx5_aso_get_wqe(aso->aso);
|
||||
ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
|
||||
mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
|
||||
MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
|
||||
|
||||
ctrl = &wqe->aso_ctrl;
|
||||
ctrl->va_l =
|
||||
cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
|
||||
ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
|
||||
ctrl->l_key = cpu_to_be32(res->mkey);
|
||||
mlx5e_ipsec_aso_copy(ctrl, data);
|
||||
|
||||
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
|
||||
return mlx5_aso_poll_cq(aso->aso, false);
|
||||
}
|
||||
|
||||
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
|
||||
u64 *packets)
|
||||
{
|
||||
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
|
||||
struct mlx5e_ipsec_aso *aso = ipsec->aso;
|
||||
u64 hard_cnt;
|
||||
|
||||
hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
|
||||
/* HW decresases the limit till it reaches zero to fire an avent.
|
||||
* We need to fix the calculations, so the returned count is a total
|
||||
* number of passed packets and not how much left.
|
||||
*/
|
||||
*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
|
||||
}
|
||||
|
@ -312,27 +312,31 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
|
||||
struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
|
||||
struct mlx5e_priv *priv;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_ipsec *ipsec = priv->ipsec;
|
||||
struct mlx5e_ipsec_sa_entry *sa_entry;
|
||||
struct xfrm_offload *xo;
|
||||
struct xfrm_state *xs;
|
||||
struct sec_path *sp;
|
||||
u32 sa_handle;
|
||||
|
||||
sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
|
||||
priv = netdev_priv(netdev);
|
||||
sp = secpath_set(skb);
|
||||
if (unlikely(!sp)) {
|
||||
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
|
||||
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
|
||||
return;
|
||||
}
|
||||
|
||||
xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
|
||||
if (unlikely(!xs)) {
|
||||
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
|
||||
rcu_read_lock();
|
||||
sa_entry = xa_load(&ipsec->sadb, sa_handle);
|
||||
if (unlikely(!sa_entry)) {
|
||||
rcu_read_unlock();
|
||||
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
|
||||
return;
|
||||
}
|
||||
xfrm_state_hold(sa_entry->x);
|
||||
rcu_read_unlock();
|
||||
|
||||
sp->xvec[sp->len++] = xs;
|
||||
sp->xvec[sp->len++] = sa_entry->x;
|
||||
sp->olen++;
|
||||
|
||||
xo = xfrm_offload(skb);
|
||||
@ -349,6 +353,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
|
||||
xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
|
||||
break;
|
||||
default:
|
||||
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
|
||||
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
|
||||
}
|
||||
}
|
||||
|
@ -37,6 +37,17 @@
|
||||
#include "en.h"
|
||||
#include "ipsec.h"
|
||||
|
||||
static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_pkts) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_bytes) },
|
||||
};
|
||||
|
||||
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
|
||||
@ -50,8 +61,48 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
|
||||
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
|
||||
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
|
||||
|
||||
#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
|
||||
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
|
||||
|
||||
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
|
||||
{
|
||||
if (!priv->ipsec)
|
||||
return 0;
|
||||
|
||||
return NUM_IPSEC_HW_COUNTERS;
|
||||
}
|
||||
|
||||
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) {}
|
||||
|
||||
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!priv->ipsec)
|
||||
return idx;
|
||||
|
||||
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
mlx5e_ipsec_hw_stats_desc[i].format);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!priv->ipsec)
|
||||
return idx;
|
||||
|
||||
mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
|
||||
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
|
||||
mlx5e_ipsec_hw_stats_desc, i);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
|
||||
{
|
||||
return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
|
||||
@ -81,4 +132,5 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
|
||||
return idx;
|
||||
}
|
||||
|
||||
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
|
||||
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
|
||||
|
@ -2480,6 +2480,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
|
||||
&MLX5E_STATS_GRP(per_prio),
|
||||
&MLX5E_STATS_GRP(pme),
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
&MLX5E_STATS_GRP(ipsec_hw),
|
||||
&MLX5E_STATS_GRP(ipsec_sw),
|
||||
#endif
|
||||
&MLX5E_STATS_GRP(tls),
|
||||
|
@ -490,6 +490,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
|
||||
extern MLX5E_DECLARE_STATS_GRP(pme);
|
||||
extern MLX5E_DECLARE_STATS_GRP(channels);
|
||||
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
|
||||
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
|
||||
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
|
||||
extern MLX5E_DECLARE_STATS_GRP(ptp);
|
||||
extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "diag/fw_tracer.h"
|
||||
#include "mlx5_irq.h"
|
||||
#include "devlink.h"
|
||||
#include "en_accel/ipsec.h"
|
||||
|
||||
enum {
|
||||
MLX5_EQE_OWNER_INIT_VAL = 0x1,
|
||||
@ -578,6 +579,10 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
|
||||
if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
|
||||
async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
|
||||
|
||||
if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
|
||||
async_event_mask |=
|
||||
(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
|
||||
|
||||
mask[0] = async_event_mask;
|
||||
|
||||
if (MLX5_CAP_GEN(dev, event_cap))
|
||||
|
@ -111,8 +111,8 @@
|
||||
#define ETHTOOL_PRIO_NUM_LEVELS 1
|
||||
#define ETHTOOL_NUM_PRIOS 11
|
||||
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
|
||||
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
|
||||
#define KERNEL_NIC_PRIO_NUM_LEVELS 7
|
||||
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
|
||||
#define KERNEL_NIC_PRIO_NUM_LEVELS 8
|
||||
#define KERNEL_NIC_NUM_PRIOS 1
|
||||
/* One more level for tc */
|
||||
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
|
||||
@ -133,7 +133,7 @@
|
||||
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
|
||||
|
||||
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
|
||||
#define KERNEL_TX_IPSEC_NUM_LEVELS 1
|
||||
#define KERNEL_TX_IPSEC_NUM_LEVELS 2
|
||||
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
|
||||
|
||||
#define KERNEL_TX_MACSEC_NUM_PRIOS 1
|
||||
|
@ -15,6 +15,7 @@
|
||||
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
|
||||
#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
|
||||
|
||||
#define ASO_CTRL_READ_EN BIT(0)
|
||||
struct mlx5_wqe_aso_ctrl_seg {
|
||||
__be32 va_h;
|
||||
__be32 va_l; /* include read_enable */
|
||||
|
Loading…
Reference in New Issue
Block a user