RDMA/hns: Remove support for HIP06

HIP06 is no longer supported. In order to reduce unnecessary maintenance,
the code of HIP06 is removed.

Link: https://lore.kernel.org/r/20211220130558.61585-1-liangwenpeng@huawei.com
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Chengchang Tang 2021-12-20 21:05:58 +08:00 committed by Jason Gunthorpe
parent 36783dec8d
commit 38d2208824
17 changed files with 33 additions and 6255 deletions

View File

@ -5,22 +5,9 @@ config INFINIBAND_HNS
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on (HNS_DSAF && HNS_ENET) || HNS3
help
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
To compile HIP06 or HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP06
bool "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
help
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
module will be called hns-roce-hw-v1
To compile HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP08
bool "Hisilicon Hip08 Family RoCE support"

View File

@ -9,11 +9,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
ifdef CONFIG_INFINIBAND_HNS_HIP06
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
endif
ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o

View File

@ -30,7 +30,6 @@
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
@ -61,7 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct hns_roce_ah *ah = to_hr_ah(ibah);
int ret = 0;
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata)
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
return -EOPNOTSUPP;
ah->av.port = rdma_ah_get_port_num(ah_attr);
@ -80,7 +79,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
/* HIP08 needs to record vlan info in Address Vector */
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
&ah->av.vlan_id, NULL);
if (ret)

View File

@ -31,10 +31,9 @@
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include "hns_roce_device.h"
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{

View File

@ -31,7 +31,6 @@
*/
#include <linux/dmapool.h>
#include <linux/platform_device.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"

View File

@ -104,208 +104,6 @@
#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
(((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
(((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
(((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
(((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
(((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
(((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
(((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
(((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
(((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
(((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
(((1UL << 15) - 1) << \
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
(((1UL << 4) - 1) << \
ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
(((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
(((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
(((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
(((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
(((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
(((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
(((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
(((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
(((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
#define ROCEE_MB6_ROCEE_MB_CMD_S 0
#define ROCEE_MB6_ROCEE_MB_CMD_M \
(((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
(((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
(((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
(((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
(((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
(((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
(((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
(((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
(((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
(((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
(((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
(((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
(((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
(((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
(((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
#define ROCEE_SDB_CNT_CMP_BITS 16
#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
/*************ROCEE_REG DEFINITION****************/
#define ROCEE_VENDOR_ID_REG 0x0
#define ROCEE_VENDOR_PART_ID_REG 0x4

View File

@ -30,7 +30,6 @@
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
@ -406,15 +405,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cqn;
}
/*
* For the QP created by kernel space, tptr value should be initialized
* to zero; For the QP created by user space, it will cause synchronous
* problems if tptr is set to zero here, so we initialize it in user
* space.
*/
if (!udata && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp,
@ -441,9 +431,6 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
if (hr_dev->hw->destroy_cq)
hr_dev->hw->destroy_cq(ib_cq, udata);
free_cqc(hr_dev, hr_cq);
free_cqn(hr_dev, hr_cq->cqn);
free_cq_db(hr_dev, hr_cq, udata);

View File

@ -4,7 +4,6 @@
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*/
#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"

View File

@ -36,36 +36,18 @@
#include <rdma/ib_verbs.h>
#include <rdma/hns-abi.h>
#define DRV_NAME "hns_roce"
#define PCI_REVISION_ID_HIP08 0x21
#define PCI_REVISION_ID_HIP09 0x30
#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
#define BA_BYTE_LEN 8
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20
#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
(5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
#define HNS_ROCE_RESERVED_SGE 1
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@ -102,18 +84,12 @@
#define HNS_ROCE_FRMR_MAX_PA 512
#define PKEY_ID 0xffff
#define GUID_LEN 8
#define NODE_DESC_SIZE 64
#define DB_REG_OFFSET 0x1000
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
#define PAGES_SHIFT_8 8
#define PAGES_SHIFT_16 16
#define PAGES_SHIFT_24 24
#define PAGES_SHIFT_32 32
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230
@ -122,11 +98,6 @@
#define CQ_BANKID_SHIFT 2
/* The chip implementation of the consumer index is calculated
* according to twice the actual EQ depth
*/
#define EQ_DEPTH_COEFF 2
enum {
SERV_TYPE_RC,
SERV_TYPE_UC,
@ -228,7 +199,6 @@ struct hns_roce_uar {
enum hns_roce_mmap_type {
HNS_ROCE_MMAP_TYPE_DB = 1,
HNS_ROCE_MMAP_TYPE_TPTR,
HNS_ROCE_MMAP_TYPE_DWQE,
};
@ -244,7 +214,6 @@ struct hns_roce_ucontext {
struct list_head page_list;
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
struct hns_user_mmap_entry *tptr_mmap_entry;
};
struct hns_roce_pd {
@ -347,9 +316,6 @@ struct hns_roce_mw {
u32 pbl_buf_pg_sz;
};
/* Only support 4K page size for mr register */
#define MR_SIZE_4K 0
struct hns_roce_mr {
struct ib_mr ibmr;
u64 iova; /* MR's virtual original addr */
@ -455,7 +421,6 @@ struct hns_roce_cq {
u32 cons_index;
u32 *set_ci_db;
void __iomem *db_reg;
u16 *tptr_addr;
int arm_sn;
int cqe_size;
unsigned long cqn;
@ -541,10 +506,6 @@ struct hns_roce_srq_table {
struct hns_roce_hem_table table;
};
struct hns_roce_raq_table {
struct hns_roce_buf_list *e_raq_buf;
};
struct hns_roce_av {
u8 port;
u8 gid_index;
@ -648,9 +609,7 @@ struct hns_roce_qp {
u8 sl;
u8 resp_depth;
u8 state;
u32 access_flags;
u32 atomic_rd_en;
u32 pkey_index;
u32 qkey;
void (*event)(struct hns_roce_qp *qp,
enum hns_roce_event event_type);
@ -683,11 +642,6 @@ struct hns_roce_ib_iboe {
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
enum {
HNS_ROCE_EQ_STAT_INVALID = 0,
HNS_ROCE_EQ_STAT_VALID = 2,
};
struct hns_roce_ceqe {
__le32 comp;
__le32 rsv[15];
@ -719,12 +673,9 @@ struct hns_roce_eq {
int type_flag; /* Aeq:1 ceq:0 */
int eqn;
u32 entries;
u32 log_entries;
int eqe_size;
int irq;
int log_page_size;
u32 cons_index;
struct hns_roce_buf_list *buf_list;
int over_ignore;
int coalesce;
int arm_st;
@ -739,7 +690,6 @@ struct hns_roce_eq {
struct hns_roce_eq_table {
struct hns_roce_eq *eq;
void __iomem **eqc_base; /* only for hw v1 */
};
enum cong_type {
@ -896,7 +846,6 @@ enum hns_roce_device_state {
};
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
int (*hw_profile)(struct hns_roce_dev *hr_dev);
@ -908,14 +857,12 @@ struct hns_roce_hw {
int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
unsigned int timeout);
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
const u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
struct hns_roce_mr *mr, unsigned long mtpt_idx);
struct hns_roce_mr *mr);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags,
void *mb_buf);
@ -935,9 +882,6 @@ struct hns_roce_hw {
enum ib_qp_state new_state);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
struct ib_udata *udata);
int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
@ -947,13 +891,11 @@ struct hns_roce_hw {
struct hns_roce_dev {
struct ib_device ib_dev;
struct platform_device *pdev;
struct pci_dev *pci_dev;
struct device *dev;
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
spinlock_t bt_cmd_lock;
bool active;
bool is_reset;
bool dis_db;
@ -1000,8 +942,6 @@ struct hns_roce_dev {
int loop_idc;
u32 sdb_offset;
u32 odb_offset;
dma_addr_t tptr_dma_addr; /* only for hw v1 */
u32 tptr_size; /* only for hw v1 */
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;

View File

@ -31,7 +31,6 @@
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_common.h"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1573,7 +1573,7 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
struct hns_roce_cmq_desc desc;
int ret;
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
hr_dev->func_num = 1;
return 0;
}
@ -2390,7 +2390,7 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
return 0;
ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
@ -2964,8 +2964,8 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev,
return hns_roce_cmq_send(hr_dev, desc, 2);
}
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
int gid_index, const union ib_gid *gid,
static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
@ -3060,8 +3060,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
void *mb_buf, struct hns_roce_mr *mr)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
int ret;
@ -6348,7 +6347,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
if (!id)
return 0;
if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
return 0;
ret = __hns_roce_hw_v2_init_instance(handle);

View File

@ -31,7 +31,6 @@
* SOFTWARE.
*/
#include <linux/acpi.h>
#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <rdma/ib_addr.h>
@ -70,7 +69,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
return ret;
}
@ -84,7 +83,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL);
ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
return ret;
}
@ -152,9 +151,6 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
if (hr_dev->hw->set_mtu)
hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
hr_dev->caps.max_mtu);
ret = hns_roce_set_mac(hr_dev, i,
hr_dev->iboe.netdevs[i]->dev_addr);
if (ret)
@ -311,17 +307,14 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
entry->mmap_type = mmap_type;
switch (mmap_type) {
/* pgoff 0 must be used by DB for compatibility */
case HNS_ROCE_MMAP_TYPE_DB:
ret = rdma_user_mmap_entry_insert_exact(
ucontext, &entry->rdma_entry, length, 0);
break;
case HNS_ROCE_MMAP_TYPE_TPTR:
ret = rdma_user_mmap_entry_insert_exact(
ucontext, &entry->rdma_entry, length, 1);
break;
case HNS_ROCE_MMAP_TYPE_DWQE:
ret = rdma_user_mmap_entry_insert_range(
ucontext, &entry->rdma_entry, length, 2,
ucontext, &entry->rdma_entry, length, 1,
U32_MAX);
break;
default:
@ -342,18 +335,12 @@ static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
if (context->db_mmap_entry)
rdma_user_mmap_entry_remove(
&context->db_mmap_entry->rdma_entry);
if (context->tptr_mmap_entry)
rdma_user_mmap_entry_remove(
&context->tptr_mmap_entry->rdma_entry);
}
static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
{
struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
u64 address;
int ret;
address = context->uar.pfn << PAGE_SHIFT;
context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
@ -361,27 +348,7 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
if (!context->db_mmap_entry)
return -ENOMEM;
if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
return 0;
/*
* FIXME: using io_remap_pfn_range on the dma address returned
* by dma_alloc_coherent is totally wrong.
*/
context->tptr_mmap_entry =
hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
hr_dev->tptr_size,
HNS_ROCE_MMAP_TYPE_TPTR);
if (!context->tptr_mmap_entry) {
ret = -ENOMEM;
goto err;
}
return 0;
err:
hns_roce_dealloc_uar_entry(context);
return ret;
}
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
@ -461,9 +428,6 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
case HNS_ROCE_MMAP_TYPE_DWQE:
prot = pgprot_device(vma->vm_page_prot);
break;
case HNS_ROCE_MMAP_TYPE_TPTR:
prot = vma->vm_page_prot;
break;
default:
return -EINVAL;
}
@ -843,7 +807,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
int ret;
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
@ -934,20 +897,13 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
struct device *dev = hr_dev->dev;
int ret;
if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, true);
if (ret) {
dev_err(dev, "Reset RoCE engine failed!\n");
return ret;
}
}
hr_dev->is_reset = false;
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
dev_err(dev, "Init RoCE Command Queue failed!\n");
goto error_failed_cmq_init;
return ret;
}
}
@ -1030,12 +986,6 @@ error_failed_cmd_init:
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
error_failed_cmq_init:
if (hr_dev->hw->reset) {
if (hr_dev->hw->reset(hr_dev, false))
dev_err(dev, "Dereset RoCE engine failed!\n");
}
return ret;
}
@ -1055,8 +1005,6 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
if (hr_dev->hw->reset)
hr_dev->hw->reset(hr_dev, false);
}
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -31,7 +31,6 @@
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
@ -173,8 +172,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
mtpt_idx);
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
@ -363,12 +361,8 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0;
if (hr_dev->hw->dereg_mr) {
ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
} else {
hns_roce_mr_free(hr_dev, mr);
kfree(mr);
}
hns_roce_mr_free(hr_dev, mr);
kfree(mr);
return ret;
}
@ -614,10 +608,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return -ENOBUFS;
for (i = 0; i < count && npage < max_count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
addr = to_hr_hw_page_addr(pages[npage]);
else
addr = pages[npage];
addr = pages[npage];
mtts[i] = cpu_to_le64(addr);
npage++;
@ -847,10 +838,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
continue;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
mtt_buf[total] = to_hr_hw_page_addr(addr);
else
mtt_buf[total] = addr;
mtt_buf[total] = addr;
total++;
}

View File

@ -30,7 +30,6 @@
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include <linux/pci.h>
#include "hns_roce_device.h"
@ -86,7 +85,6 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
struct resource *res;
int id;
/* Using bitmap to manager UAR index */
@ -104,21 +102,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
else
uar->index = 0;
if (!dev_is_pci(hr_dev->dev)) {
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
if (!res) {
ida_free(&uar_ida->ida, id);
dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
return -EINVAL;
}
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
} else {
uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
>> PAGE_SHIFT);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
hr_dev->dwqe_page =
pci_resource_start(hr_dev->pci_dev, 4);
}
uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4);
return 0;
}

View File

@ -32,7 +32,6 @@
*/
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
@ -110,12 +109,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
return;
}
if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
(event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
qp->state = IB_QPS_ERR;
flush_cqe(hr_dev, qp);
@ -219,13 +217,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
int ret;
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
/* when hw version is v1, the sqpn is allocated */
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
num = HNS_ROCE_MAX_PORTS +
hr_dev->iboe.phy_port[hr_qp->port];
else
num = 1;
num = 1;
hr_qp->doorbell_qpn = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
@ -324,11 +316,6 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (!hr_qp->qpn)
return -EINVAL;
/* In v1 engine, GSI QP context is saved in the RoCE hw's register */
if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
hr_dev->hw_rev == HNS_ROCE_HW_VER1)
return 0;
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
@ -407,11 +394,6 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
/* In v1 engine, GSI QP context is saved in the RoCE hw's register */
if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
hr_dev->hw_rev == HNS_ROCE_HW_VER1)
return;
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
@ -540,11 +522,6 @@ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE;
return;
}
hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
@ -1210,7 +1187,7 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
goto out;
break;
case IB_QPT_UD:
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
is_user)
goto out;
break;