mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
Merge branch 'mlx5-fixes-2023-11-13-manual'
Saeed Mahameed says: ==================== This series provides bug fixes to mlx5 driver. ==================== Link: https://lore.kernel.org/r/20231114215846.5902-1-saeed@kernel.org/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
bdc454fcdc
@ -177,6 +177,8 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
|
||||
|
||||
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
u8 *md_buff,
|
||||
u8 *md_buff_sz,
|
||||
int budget)
|
||||
{
|
||||
struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
|
||||
@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
|
||||
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
|
||||
out:
|
||||
napi_consume_skb(skb, budget);
|
||||
mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
|
||||
md_buff[*md_buff_sz++] = metadata_id;
|
||||
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
|
||||
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
|
||||
}
|
||||
|
||||
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
|
||||
struct mlx5_cqwq *cqwq = &cq->wq;
|
||||
int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
|
||||
u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
|
||||
u8 metadata_buff_sz = 0;
|
||||
struct mlx5_cqwq *cqwq;
|
||||
struct mlx5_cqe64 *cqe;
|
||||
int work_done = 0;
|
||||
|
||||
cqwq = &cq->wq;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
|
||||
return false;
|
||||
|
||||
@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
do {
|
||||
mlx5_cqwq_pop(cqwq);
|
||||
|
||||
mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
|
||||
mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
|
||||
metadata_buff, &metadata_buff_sz, napi_budget);
|
||||
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
|
||||
|
||||
mlx5_cqwq_update_db_record(cqwq);
|
||||
@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
/* ensure cq space is freed before enabling more cqes */
|
||||
wmb();
|
||||
|
||||
while (metadata_buff_sz > 0)
|
||||
mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
|
||||
metadata_buff[--metadata_buff_sz]);
|
||||
|
||||
mlx5e_txqsq_wake(&ptpsq->txqsq);
|
||||
|
||||
return work_done == budget;
|
||||
|
@ -492,11 +492,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
|
||||
|
||||
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
|
||||
{
|
||||
char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
|
||||
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
|
||||
struct mlx5e_icosq *icosq = rq->icosq;
|
||||
struct mlx5e_priv *priv = rq->priv;
|
||||
struct mlx5e_err_ctx err_ctx = {};
|
||||
char icosq_str[32] = {};
|
||||
|
||||
err_ctx.ctx = rq;
|
||||
err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
|
||||
@ -505,7 +505,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
|
||||
if (icosq)
|
||||
snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
|
||||
snprintf(err_str, sizeof(err_str),
|
||||
"RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
|
||||
"RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
|
||||
rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
|
||||
|
||||
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
|
||||
|
@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto free_encap;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
* and not used before that.
|
||||
*/
|
||||
goto release_neigh;
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto free_encap;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
* and not used before that.
|
||||
*/
|
||||
goto release_neigh;
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
|
@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
|
||||
struct ethtool_drvinfo *drvinfo)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int count;
|
||||
|
||||
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)",
|
||||
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
|
||||
mdev->board_id);
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
|
||||
strscpy(drvinfo->bus_info, dev_name(mdev->device),
|
||||
sizeof(drvinfo->bus_info));
|
||||
}
|
||||
|
@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int count;
|
||||
|
||||
strscpy(drvinfo->driver, mlx5e_rep_driver_name,
|
||||
sizeof(drvinfo->driver));
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)",
|
||||
fw_rev_maj(mdev), fw_rev_min(mdev),
|
||||
fw_rev_sub(mdev), mdev->board_id);
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
}
|
||||
|
||||
static const struct counter_desc sw_rep_stats_desc[] = {
|
||||
|
@ -3147,7 +3147,7 @@ static struct mlx5_fields fields[] = {
|
||||
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
|
||||
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
|
||||
OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
|
||||
OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
|
||||
|
||||
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
|
||||
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
|
||||
@ -3158,21 +3158,31 @@ static struct mlx5_fields fields[] = {
|
||||
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
|
||||
};
|
||||
|
||||
static unsigned long mask_to_le(unsigned long mask, int size)
|
||||
static u32 mask_field_get(void *mask, struct mlx5_fields *f)
|
||||
{
|
||||
__be32 mask_be32;
|
||||
__be16 mask_be16;
|
||||
|
||||
if (size == 32) {
|
||||
mask_be32 = (__force __be32)(mask);
|
||||
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
|
||||
} else if (size == 16) {
|
||||
mask_be32 = (__force __be32)(mask);
|
||||
mask_be16 = *(__be16 *)&mask_be32;
|
||||
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
|
||||
switch (f->field_bsize) {
|
||||
case 32:
|
||||
return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
|
||||
case 16:
|
||||
return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
|
||||
default:
|
||||
return *(u8 *)mask & (u8)f->field_mask;
|
||||
}
|
||||
}
|
||||
|
||||
return mask;
|
||||
static void mask_field_clear(void *mask, struct mlx5_fields *f)
|
||||
{
|
||||
switch (f->field_bsize) {
|
||||
case 32:
|
||||
*(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
|
||||
break;
|
||||
case 16:
|
||||
*(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
|
||||
break;
|
||||
default:
|
||||
*(u8 *)mask &= ~(u8)f->field_mask;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
@ -3184,11 +3194,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
|
||||
struct pedit_headers_action *hdrs = parse_attr->hdrs;
|
||||
void *headers_c, *headers_v, *action, *vals_p;
|
||||
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_acts;
|
||||
unsigned long mask, field_mask;
|
||||
void *s_masks_p, *a_masks_p;
|
||||
int i, first, last, next_z;
|
||||
struct mlx5_fields *f;
|
||||
unsigned long mask;
|
||||
u32 s_mask, a_mask;
|
||||
u8 cmd;
|
||||
|
||||
mod_acts = &parse_attr->mod_hdr_acts;
|
||||
@ -3204,15 +3215,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
bool skip;
|
||||
|
||||
f = &fields[i];
|
||||
/* avoid seeing bits set from previous iterations */
|
||||
s_mask = 0;
|
||||
a_mask = 0;
|
||||
|
||||
s_masks_p = (void *)set_masks + f->offset;
|
||||
a_masks_p = (void *)add_masks + f->offset;
|
||||
|
||||
s_mask = *s_masks_p & f->field_mask;
|
||||
a_mask = *a_masks_p & f->field_mask;
|
||||
s_mask = mask_field_get(s_masks_p, f);
|
||||
a_mask = mask_field_get(a_masks_p, f);
|
||||
|
||||
if (!s_mask && !a_mask) /* nothing to offload here */
|
||||
continue;
|
||||
@ -3239,22 +3246,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
match_mask, f->field_bsize))
|
||||
skip = true;
|
||||
/* clear to denote we consumed this field */
|
||||
*s_masks_p &= ~f->field_mask;
|
||||
mask_field_clear(s_masks_p, f);
|
||||
} else {
|
||||
cmd = MLX5_ACTION_TYPE_ADD;
|
||||
mask = a_mask;
|
||||
vals_p = (void *)add_vals + f->offset;
|
||||
/* add 0 is no change */
|
||||
if ((*(u32 *)vals_p & f->field_mask) == 0)
|
||||
if (!mask_field_get(vals_p, f))
|
||||
skip = true;
|
||||
/* clear to denote we consumed this field */
|
||||
*a_masks_p &= ~f->field_mask;
|
||||
mask_field_clear(a_masks_p, f);
|
||||
}
|
||||
if (skip)
|
||||
continue;
|
||||
|
||||
mask = mask_to_le(mask, f->field_bsize);
|
||||
|
||||
first = find_first_bit(&mask, f->field_bsize);
|
||||
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
|
||||
last = find_last_bit(&mask, f->field_bsize);
|
||||
@ -3281,10 +3286,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
MLX5_SET(set_action_in, action, field, f->field);
|
||||
|
||||
if (cmd == MLX5_ACTION_TYPE_SET) {
|
||||
unsigned long field_mask = f->field_mask;
|
||||
int start;
|
||||
|
||||
field_mask = mask_to_le(f->field_mask, f->field_bsize);
|
||||
|
||||
/* if field is bit sized it can start not from first bit */
|
||||
start = find_first_bit(&field_mask, f->field_bsize);
|
||||
|
||||
|
@ -399,9 +399,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
|
||||
|
||||
mlx5e_skb_cb_hwtstamp_init(skb);
|
||||
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
|
||||
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
|
||||
metadata_index);
|
||||
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
|
||||
if (!netif_tx_queue_stopped(sq->txq) &&
|
||||
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
|
||||
netif_tx_stop_queue(sq->txq);
|
||||
@ -494,10 +494,10 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
|
||||
err_drop:
|
||||
stats->dropped++;
|
||||
dev_kfree_skb_any(skb);
|
||||
if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
|
||||
mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
|
||||
be32_to_cpu(eseg->flow_table_metadata));
|
||||
dev_kfree_skb_any(skb);
|
||||
mlx5e_tx_flush(sq);
|
||||
}
|
||||
|
||||
|
@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
struct mlx5_irq *irq;
|
||||
int cpu;
|
||||
|
||||
irq = xa_load(&table->comp_irqs, vecidx);
|
||||
if (!irq)
|
||||
return;
|
||||
|
||||
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
|
||||
cpumask_clear_cpu(cpu, &table->used_cpus);
|
||||
xa_erase(&table->comp_irqs, vecidx);
|
||||
mlx5_irq_affinity_irq_release(dev, irq);
|
||||
}
|
||||
@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
|
||||
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
|
||||
struct irq_affinity_desc af_desc = {};
|
||||
struct mlx5_irq *irq;
|
||||
|
||||
irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
|
||||
if (IS_ERR(irq)) {
|
||||
/* In case SF irq pool does not exist, fallback to the PF irqs*/
|
||||
if (PTR_ERR(irq) == -ENOENT)
|
||||
return comp_irq_request_pci(dev, vecidx);
|
||||
/* In case SF irq pool does not exist, fallback to the PF irqs*/
|
||||
if (!mlx5_irq_pool_is_sf_pool(pool))
|
||||
return comp_irq_request_pci(dev, vecidx);
|
||||
|
||||
af_desc.is_managed = 1;
|
||||
cpumask_copy(&af_desc.mask, cpu_online_mask);
|
||||
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
|
||||
irq = mlx5_irq_affinity_request(pool, &af_desc);
|
||||
if (IS_ERR(irq))
|
||||
return PTR_ERR(irq);
|
||||
}
|
||||
|
||||
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
|
||||
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
||||
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
|
||||
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
|
||||
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
|
||||
|
||||
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
|
||||
}
|
||||
|
@ -984,7 +984,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
|
||||
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
|
||||
if (rep->vport == MLX5_VPORT_UPLINK &&
|
||||
on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
|
||||
dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
|
||||
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
|
||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
|
@ -168,45 +168,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
|
||||
if (pool->irqs_per_cpu)
|
||||
cpu_put(pool, cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
|
||||
* @dev: mlx5 device that is requesting the IRQ.
|
||||
* @used_cpus: cpumask of bounded cpus by the device
|
||||
* @vecidx: vector index to request an IRQ for.
|
||||
*
|
||||
* Each IRQ is bounded to at most 1 CPU.
|
||||
* This function is requesting an IRQ according to the default assignment.
|
||||
* The default assignment policy is:
|
||||
* - request the least loaded IRQ which is not bound to any
|
||||
* CPU of the previous IRQs requested.
|
||||
*
|
||||
* On success, this function updates used_cpus mask and returns an irq pointer.
|
||||
* In case of an error, an appropriate error pointer is returned.
|
||||
*/
|
||||
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
|
||||
struct cpumask *used_cpus, u16 vecidx)
|
||||
{
|
||||
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
|
||||
struct irq_affinity_desc af_desc = {};
|
||||
struct mlx5_irq *irq;
|
||||
|
||||
if (!mlx5_irq_pool_is_sf_pool(pool))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
af_desc.is_managed = 1;
|
||||
cpumask_copy(&af_desc.mask, cpu_online_mask);
|
||||
cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
|
||||
irq = mlx5_irq_affinity_request(pool, &af_desc);
|
||||
|
||||
if (IS_ERR(irq))
|
||||
return irq;
|
||||
|
||||
cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
|
||||
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
||||
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
|
||||
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
|
||||
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
|
||||
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
|
||||
{
|
||||
return mlx5_ptp_adjtime(ptp, delta);
|
||||
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
|
||||
struct mlx5_core_dev *mdev;
|
||||
|
||||
mdev = container_of(clock, struct mlx5_core_dev, clock);
|
||||
|
||||
return mlx5_ptp_adjtime_real_time(mdev, delta);
|
||||
}
|
||||
|
||||
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
|
||||
|
@ -28,7 +28,7 @@
|
||||
struct mlx5_irq {
|
||||
struct atomic_notifier_head nh;
|
||||
cpumask_var_t mask;
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
char name[MLX5_MAX_IRQ_FORMATTED_NAME];
|
||||
struct mlx5_irq_pool *pool;
|
||||
int refcount;
|
||||
struct msi_map map;
|
||||
@ -292,8 +292,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
||||
else
|
||||
irq_sf_set_name(pool, name, i);
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
|
||||
snprintf(irq->name, MLX5_MAX_IRQ_NAME,
|
||||
"%s@pci:%s", name, pci_name(dev->pdev));
|
||||
snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
|
||||
MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
|
||||
err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
|
||||
&irq->nh);
|
||||
if (err) {
|
||||
|
@ -7,6 +7,9 @@
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#define MLX5_MAX_IRQ_NAME (32)
|
||||
#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
|
||||
#define MLX5_MAX_IRQ_FORMATTED_NAME \
|
||||
(MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
|
||||
/* max irq_index is 2047, so four chars */
|
||||
#define MLX5_MAX_IRQ_IDX_CHARS (4)
|
||||
#define MLX5_EQ_REFS_PER_IRQ (2)
|
||||
|
@ -57,7 +57,8 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
|
||||
|
||||
static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
|
||||
return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX ||
|
||||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
|
||||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,6 @@ struct dr_qp_init_attr {
|
||||
u32 cqn;
|
||||
u32 pdn;
|
||||
u32 max_send_wr;
|
||||
u32 max_send_sge;
|
||||
struct mlx5_uars_page *uar;
|
||||
u8 isolate_vl_tc:1;
|
||||
};
|
||||
@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
|
||||
return err == CQ_POLL_ERR ? err : npolled;
|
||||
}
|
||||
|
||||
static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
|
||||
{
|
||||
return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
|
||||
}
|
||||
|
||||
/* We calculate for specific RC QP with the required functionality */
|
||||
static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
|
||||
{
|
||||
int update_arg_size;
|
||||
int inl_size = 0;
|
||||
int tot_size;
|
||||
int size;
|
||||
|
||||
update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
|
||||
|
||||
size = sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg);
|
||||
inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
|
||||
DR_STE_SIZE, 16);
|
||||
|
||||
size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
|
||||
|
||||
size = max(size, update_arg_size);
|
||||
|
||||
tot_size = max(size, inl_size);
|
||||
|
||||
return ALIGN(tot_size, MLX5_SEND_WQE_BB);
|
||||
}
|
||||
|
||||
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
|
||||
struct dr_qp_init_attr *attr)
|
||||
{
|
||||
@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
|
||||
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
|
||||
struct mlx5_wq_param wqp;
|
||||
struct mlx5dr_qp *dr_qp;
|
||||
int wqe_size;
|
||||
int inlen;
|
||||
void *qpc;
|
||||
void *in;
|
||||
@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
|
||||
if (err)
|
||||
goto err_in;
|
||||
dr_qp->uar = attr->uar;
|
||||
wqe_size = dr_qp_calc_rc_send_wqe(attr);
|
||||
dr_qp->max_inline_data = min(wqe_size -
|
||||
(sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg) +
|
||||
sizeof(struct mlx5_wqe_inline_seg)),
|
||||
(2 * MLX5_SEND_WQE_BB -
|
||||
(sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg) +
|
||||
sizeof(struct mlx5_wqe_inline_seg))));
|
||||
|
||||
return dr_qp;
|
||||
|
||||
@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
|
||||
MLX5_SEND_WQE_DS;
|
||||
}
|
||||
|
||||
static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
|
||||
struct dr_data_seg *data_seg, void *wqe)
|
||||
{
|
||||
int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg) +
|
||||
sizeof(struct mlx5_wqe_inline_seg);
|
||||
struct mlx5_wqe_inline_seg *seg;
|
||||
int left_space;
|
||||
int inl = 0;
|
||||
void *addr;
|
||||
int len;
|
||||
int idx;
|
||||
|
||||
seg = wqe;
|
||||
wqe += sizeof(*seg);
|
||||
addr = (void *)(unsigned long)(data_seg->addr);
|
||||
len = data_seg->length;
|
||||
inl += len;
|
||||
left_space = MLX5_SEND_WQE_BB - inline_header_size;
|
||||
|
||||
if (likely(len > left_space)) {
|
||||
memcpy(wqe, addr, left_space);
|
||||
len -= left_space;
|
||||
addr += left_space;
|
||||
idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
|
||||
wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
|
||||
}
|
||||
|
||||
memcpy(wqe, addr, len);
|
||||
|
||||
if (likely(inl)) {
|
||||
seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
|
||||
return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
|
||||
MLX5_SEND_WQE_DS);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
|
||||
struct mlx5_wqe_ctrl_seg *wq_ctrl,
|
||||
dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
|
||||
u64 remote_addr,
|
||||
u32 rkey,
|
||||
struct dr_data_seg *data_seg,
|
||||
@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
|
||||
wq_raddr->reserved = 0;
|
||||
|
||||
wq_dseg = (void *)(wq_raddr + 1);
|
||||
/* WQE ctrl segment + WQE remote addr segment */
|
||||
*size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
|
||||
|
||||
if (data_seg->send_flags & IB_SEND_INLINE) {
|
||||
*size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
|
||||
} else {
|
||||
wq_dseg->byte_count = cpu_to_be32(data_seg->length);
|
||||
wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
|
||||
wq_dseg->addr = cpu_to_be64(data_seg->addr);
|
||||
*size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
|
||||
}
|
||||
wq_dseg->byte_count = cpu_to_be32(data_seg->length);
|
||||
wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
|
||||
wq_dseg->addr = cpu_to_be64(data_seg->addr);
|
||||
|
||||
*size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
|
||||
sizeof(*wq_dseg) + /* WQE data segment */
|
||||
sizeof(*wq_raddr)) / /* WQE remote addr segment */
|
||||
MLX5_SEND_WQE_DS;
|
||||
}
|
||||
|
||||
static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
|
||||
@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
|
||||
switch (opcode) {
|
||||
case MLX5_OPCODE_RDMA_READ:
|
||||
case MLX5_OPCODE_RDMA_WRITE:
|
||||
dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
|
||||
dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
|
||||
rkey, data_seg, &size);
|
||||
break;
|
||||
case MLX5_OPCODE_FLOW_TBL_ACCESS:
|
||||
@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
|
||||
if (send_ring->pending_wqe % send_ring->signal_th == 0)
|
||||
send_info->write.send_flags |= IB_SEND_SIGNALED;
|
||||
else
|
||||
send_info->write.send_flags &= ~IB_SEND_SIGNALED;
|
||||
send_info->write.send_flags = 0;
|
||||
}
|
||||
|
||||
static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
|
||||
@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
|
||||
}
|
||||
|
||||
send_ring->pending_wqe++;
|
||||
if (!send_info->write.lkey)
|
||||
send_info->write.send_flags |= IB_SEND_INLINE;
|
||||
|
||||
if (send_ring->pending_wqe % send_ring->signal_th == 0)
|
||||
send_info->write.send_flags |= IB_SEND_SIGNALED;
|
||||
else
|
||||
send_info->write.send_flags &= ~IB_SEND_SIGNALED;
|
||||
|
||||
send_ring->pending_wqe++;
|
||||
send_info->read.length = send_info->write.length;
|
||||
@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
|
||||
send_info->read.lkey = send_ring->sync_mr->mkey;
|
||||
|
||||
if (send_ring->pending_wqe % send_ring->signal_th == 0)
|
||||
send_info->read.send_flags |= IB_SEND_SIGNALED;
|
||||
send_info->read.send_flags = IB_SEND_SIGNALED;
|
||||
else
|
||||
send_info->read.send_flags &= ~IB_SEND_SIGNALED;
|
||||
send_info->read.send_flags = 0;
|
||||
}
|
||||
|
||||
static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
|
||||
@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
|
||||
dmn->send_ring->cq->qp = dmn->send_ring->qp;
|
||||
|
||||
dmn->info.max_send_wr = QUEUE_SIZE;
|
||||
init_attr.max_send_sge = 1;
|
||||
dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
|
||||
DR_STE_SIZE);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user