vdpa/mlx5: Fix memory key MTT population

map_direct_mr() assumed that the number of scatter/gather entries
returned by dma_map_sg_attrs() was equal to the number of segments in
the sgl list. This led to wrong population of the mkey object. Fix this
by properly referring to the returned value.

The hardware expects each MTT entry to contain the DMA address of a
contiguous block of memory of size (1 << mr->log_size) bytes.
dma_map_sg_attrs() can coalesce several sg entries into a single
scatter/gather entry of contiguous DMA range so we need to scan the list
and refer to the size of each s/g entry.

In addition, get rid of fill_sg() which effect is overwritten by
populate_mtts().

Fixes: 94abbccdf2 ("vdpa/mlx5: Add shared memory registration code")
Signed-off-by: Eli Cohen <elic@nvidia.com>
Link: https://lore.kernel.org/r/20210107071845.GA224876@mtl-vdi-166.wap.labs.mlnx
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Eli Cohen 2021-01-07 09:18:45 +02:00 committed by Michael S. Tsirkin
parent 19c329f680
commit 710eb8e32d
2 changed files with 13 additions and 16 deletions

View File

@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr {
struct sg_table sg_head;
int log_size;
int nsg;
int nent;
struct list_head list;
u64 offset;
};

View File

@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift)
return (npages + 1) / 2;
}
static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
{
struct scatterlist *sg;
__be64 *pas;
int i;
pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
(*pas) = cpu_to_be64(sg_dma_address(sg));
}
static void mlx5_set_access_mode(void *mkc, int mode)
{
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode)
static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
{
struct scatterlist *sg;
int nsg = mr->nsg;
u64 dma_addr;
u64 dma_len;
int j = 0;
int i;
for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
mtt[i] = cpu_to_be64(sg_dma_address(sg));
for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
nsg && dma_len;
nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
mtt[j++] = cpu_to_be64(dma_addr);
}
}
static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
fill_sg(mr, in);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
if (!err)
mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
if (!mr->nent)
goto err_map;
err = create_direct_mr(mvdev, mr);