mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-14 16:23:51 +08:00
nvme-rdma: limit the maximal queue size for RDMA controllers
Corrent limit of 1024 isn't valid for some of the RDMA based ctrls. In case the target expose a cap of larger amount of entries (e.g. 1024), the initiator may fail to create a QP with this size. Thus limit to a value that works for all RDMA adapters. Future general solution should use RDMA/core API to calculate this size according to device capabilities and number of WRs needed per NVMe IO request. Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
2351ead99c
commit
44c3c6257e
@ -1112,6 +1112,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
|
||||
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
|
||||
}
|
||||
|
||||
if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
|
||||
dev_warn(ctrl->ctrl.device,
|
||||
"ctrl sqsize %u > max queue size %u, clamping down\n",
|
||||
ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
|
||||
ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
|
||||
}
|
||||
|
||||
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
|
||||
dev_warn(ctrl->ctrl.device,
|
||||
"sqsize %u > ctrl maxcmd %u, clamping down\n",
|
||||
|
@ -6,6 +6,8 @@
|
||||
#ifndef _LINUX_NVME_RDMA_H
|
||||
#define _LINUX_NVME_RDMA_H
|
||||
|
||||
#define NVME_RDMA_MAX_QUEUE_SIZE 128
|
||||
|
||||
enum nvme_rdma_cm_fmt {
|
||||
NVME_RDMA_CM_FMT_1_0 = 0x0,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user