RDMA/bnxt_re: Enable low latency push

Introduce driver specific uapi functionalites. Added a alloc_page
functionality for user library to allocate specific pages. Currently added
support for allocating write combine pages for push functinality. This
interface shall be extended for other page allocations.

Allocate a WC page using the uapi hook for enabling the low latency push
in Gen P5 adapters for small packets. This is supported only for the user
space QPs.

Link: https://lore.kernel.org/r/1686679943-17117-8-git-send-email-selvin.xavier@broadcom.com
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Selvin Xavier 2023-06-13 11:12:23 -07:00 committed by Jason Gunthorpe
parent 0ac20faf5d
commit 360da60d6c
7 changed files with 204 additions and 4 deletions

View File

@ -39,6 +39,7 @@
#ifndef __BNXT_RE_H__
#define __BNXT_RE_H__
#include <rdma/uverbs_ioctl.h>
#include "hw_counters.h"
#define ROCE_DRV_MODULE_NAME "bnxt_re"
@ -189,4 +190,6 @@ static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
return &rdev->ibdev.dev;
return NULL;
}
extern const struct uapi_definition bnxt_re_uapi_defs[];
#endif

View File

@ -61,6 +61,15 @@
#include "bnxt_re.h"
#include "ib_verbs.h"
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_std_types.h>
#include <rdma/ib_user_ioctl_cmds.h>
#define UVERBS_MODULE_NAME bnxt_re
#include <rdma/uverbs_named_ioctl.h>
#include <rdma/bnxt_re-abi.h>
static int __from_ib_access_flags(int iflags)
@ -546,6 +555,7 @@ bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
entry->mem_offset = mem_offset;
entry->mmap_flag = mmap_flag;
entry->uctx = uctx;
switch (mmap_flag) {
case BNXT_RE_MMAP_SH_PAGE:
@ -553,6 +563,7 @@ bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
&entry->rdma_entry, PAGE_SIZE, 0);
break;
case BNXT_RE_MMAP_UC_DB:
case BNXT_RE_MMAP_WC_DB:
ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
&entry->rdma_entry, PAGE_SIZE);
break;
@ -4056,6 +4067,9 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
resp.mode = rdev->chip_ctx->modes.wqe_mode;
if (rdev->chip_ctx->modes.db_push)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
if (!entry) {
rc = -ENOMEM;
@ -4119,6 +4133,12 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
rdma_entry);
switch (bnxt_entry->mmap_flag) {
case BNXT_RE_MMAP_WC_DB:
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
pgprot_writecombine(vma->vm_page_prot),
rdma_entry);
break;
case BNXT_RE_MMAP_UC_DB:
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
@ -4146,3 +4166,131 @@ void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
kfree(bnxt_entry);
}
static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
enum bnxt_re_alloc_page_type alloc_type;
struct bnxt_re_user_mmap_entry *entry;
enum bnxt_re_mmap_flag mmap_flag;
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_re_ucontext *uctx;
struct bnxt_re_dev *rdev;
u64 mmap_offset;
u32 length;
u32 dpi;
u64 dbr;
int err;
uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
if (IS_ERR(uctx))
return PTR_ERR(uctx);
err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
if (err)
return err;
rdev = uctx->rdev;
cctx = rdev->chip_ctx;
switch (alloc_type) {
case BNXT_RE_ALLOC_WC_PAGE:
if (cctx->modes.db_push) {
if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
uctx, BNXT_QPLIB_DPI_TYPE_WC))
return -ENOMEM;
length = PAGE_SIZE;
dpi = uctx->wcdpi.dpi;
dbr = (u64)uctx->wcdpi.umdbr;
mmap_flag = BNXT_RE_MMAP_WC_DB;
} else {
return -EINVAL;
}
break;
default:
return -EOPNOTSUPP;
}
entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset);
if (IS_ERR(entry))
return PTR_ERR(entry);
uobj->object = entry;
uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
&mmap_offset, sizeof(mmap_offset));
if (err)
return err;
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
&length, sizeof(length));
if (err)
return err;
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
&dpi, sizeof(length));
if (err)
return err;
return 0;
}
static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_user_mmap_entry *entry = uobject->object;
struct bnxt_re_ucontext *uctx = entry->uctx;
switch (entry->mmap_flag) {
case BNXT_RE_MMAP_WC_DB:
if (uctx && uctx->wcdpi.dbr) {
struct bnxt_re_dev *rdev = uctx->rdev;
bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
uctx->wcdpi.dbr = NULL;
}
break;
default:
goto exit;
}
rdma_user_mmap_entry_remove(&entry->rdma_entry);
exit:
return 0;
}
DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
BNXT_RE_OBJECT_ALLOC_PAGE,
UVERBS_ACCESS_NEW,
UA_MANDATORY),
UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
enum bnxt_re_alloc_page_type,
UA_MANDATORY),
UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
UVERBS_ATTR_TYPE(u64),
UA_MANDATORY),
UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY),
UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
BNXT_RE_OBJECT_ALLOC_PAGE,
UVERBS_ACCESS_DESTROY,
UA_MANDATORY));
DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
&UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
&UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
const struct uapi_definition bnxt_re_uapi_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
{}
};

View File

@ -61,6 +61,7 @@ struct bnxt_re_pd {
struct bnxt_qplib_pd qplib_pd;
struct bnxt_re_fence_data fence;
struct rdma_user_mmap_entry *pd_db_mmap;
struct rdma_user_mmap_entry *pd_wcdb_mmap;
};
struct bnxt_re_ah {
@ -135,6 +136,7 @@ struct bnxt_re_ucontext {
struct ib_ucontext ib_uctx;
struct bnxt_re_dev *rdev;
struct bnxt_qplib_dpi dpi;
struct bnxt_qplib_dpi wcdpi;
void *shpg;
spinlock_t sh_lock; /* protect shpg */
struct rdma_user_mmap_entry *shpage_mmap;
@ -143,10 +145,12 @@ struct bnxt_re_ucontext {
enum bnxt_re_mmap_flag {
BNXT_RE_MMAP_SH_PAGE,
BNXT_RE_MMAP_UC_DB,
BNXT_RE_MMAP_WC_DB,
};
struct bnxt_re_user_mmap_entry {
struct rdma_user_mmap_entry rdma_entry;
struct bnxt_re_ucontext *uctx;
u64 mem_offset;
u8 mmap_flag;
};

View File

@ -66,6 +66,7 @@
#include <rdma/bnxt_re-abi.h>
#include "bnxt.h"
#include "hw_counters.h"
#include "ib_verbs.h"
static char version[] =
BNXT_RE_DESC "\n";
@ -117,6 +118,10 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
* in such cases and DB-push will be disabled.
*/
barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION);
if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) {
res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n");
}
}
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
@ -395,8 +400,7 @@ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
int rc;
memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
HWRM_FUNC_QCFG, -1, -1);
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
@ -416,13 +420,20 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
struct hwrm_func_qcaps_input req = {};
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_fw_msg fw_msg = {};
int rc;
cctx = rdev->chip_ctx;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
return bnxt_send_msg(en_dev, &fw_msg);
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
return rc;
cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
return 0;
}
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
@ -669,6 +680,9 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
ibdev->driver_def = bnxt_re_uapi_defs;
ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
if (ret)

View File

@ -740,6 +740,9 @@ int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
dpi->dbr = dpit->priv_db;
dpi->dpi = dpi->bit;
break;
case BNXT_QPLIB_DPI_TYPE_WC:
dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
break;
default:
dpi->dbr = ioremap(umaddr, PAGE_SIZE);
break;

View File

@ -47,7 +47,7 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
struct bnxt_qplib_drv_modes {
u8 wqe_mode;
/* Other modes to follow here */
bool db_push;
};
struct bnxt_qplib_chip_ctx {
@ -194,6 +194,7 @@ struct bnxt_qplib_sgid_tbl {
enum {
BNXT_QPLIB_DPI_TYPE_KERNEL = 0,
BNXT_QPLIB_DPI_TYPE_UC = 1,
BNXT_QPLIB_DPI_TYPE_WC = 2
};
struct bnxt_qplib_dpi {

View File

@ -41,6 +41,7 @@
#define __BNXT_RE_UVERBS_ABI_H__
#include <linux/types.h>
#include <rdma/ib_user_ioctl_cmds.h>
#define BNXT_RE_ABI_VERSION 1
@ -51,6 +52,7 @@
enum {
BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL,
BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
};
enum bnxt_re_wqe_mode {
@ -127,4 +129,29 @@ enum bnxt_re_shpg_offt {
BNXT_RE_END_RESV_OFFT = 0xFF0
};
enum bnxt_re_objects {
BNXT_RE_OBJECT_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
};
enum bnxt_re_alloc_page_type {
BNXT_RE_ALLOC_WC_PAGE = 0,
};
enum bnxt_re_var_alloc_page_attrs {
BNXT_RE_ALLOC_PAGE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
BNXT_RE_ALLOC_PAGE_TYPE,
BNXT_RE_ALLOC_PAGE_DPI,
BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
};
enum bnxt_re_alloc_page_attrs {
BNXT_RE_DESTROY_PAGE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
};
enum bnxt_re_alloc_page_methods {
BNXT_RE_METHOD_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
BNXT_RE_METHOD_DESTROY_PAGE,
};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/