mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 01:54:09 +08:00
RDMA/iw_cxgb3: Remove the iw_cxgb3 module from kernel
Remove iw_cxgb3 module from kernel as the corresponding HW Chelsio T3 has reached EOL. Link: https://lore.kernel.org/r/20190930074252.20133-1-bharat@chelsio.com Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
f3fceba5da
commit
30e0f6cf5a
@ -314,25 +314,6 @@ Description:
|
||||
board_id: (RO) Manufacturing board ID
|
||||
|
||||
|
||||
sysfs interface for Chelsio T3 RDMA Driver (cxgb3)
|
||||
--------------------------------------------------
|
||||
|
||||
What: /sys/class/infiniband/cxgb3_X/hw_rev
|
||||
What: /sys/class/infiniband/cxgb3_X/hca_type
|
||||
What: /sys/class/infiniband/cxgb3_X/board_id
|
||||
Date: Feb, 2007
|
||||
KernelVersion: v2.6.21
|
||||
Contact: linux-rdma@vger.kernel.org
|
||||
Description:
|
||||
hw_rev: (RO) Hardware revision number
|
||||
|
||||
hca_type: (RO) HCA type. Here it is a driver short name.
|
||||
It should normally match the name in its bus
|
||||
driver structure (e.g. pci_driver::name).
|
||||
|
||||
board_id: (RO) Manufacturing board id
|
||||
|
||||
|
||||
sysfs interface for Mellanox ConnectX HCA IB driver (mlx4)
|
||||
----------------------------------------------------------
|
||||
|
||||
|
@ -4459,14 +4459,6 @@ W: http://www.chelsio.com
|
||||
S: Supported
|
||||
F: drivers/scsi/cxgbi/cxgb3i
|
||||
|
||||
CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
|
||||
M: Potnuri Bharat Teja <bharat@chelsio.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.openfabrics.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/cxgb3/
|
||||
F: include/uapi/rdma/cxgb3-abi.h
|
||||
|
||||
CXGB4 CRYPTO DRIVER (chcr)
|
||||
M: Atul Gupta <atul.gupta@chelsio.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
|
@ -83,7 +83,6 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
|
||||
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
|
||||
source "drivers/infiniband/hw/mthca/Kconfig"
|
||||
source "drivers/infiniband/hw/qib/Kconfig"
|
||||
source "drivers/infiniband/hw/cxgb3/Kconfig"
|
||||
source "drivers/infiniband/hw/cxgb4/Kconfig"
|
||||
source "drivers/infiniband/hw/efa/Kconfig"
|
||||
source "drivers/infiniband/hw/i40iw/Kconfig"
|
||||
|
@ -1,7 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
|
||||
obj-$(CONFIG_INFINIBAND_QIB) += qib/
|
||||
obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
|
||||
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
|
||||
obj-$(CONFIG_INFINIBAND_EFA) += efa/
|
||||
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
|
||||
|
@ -1,19 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config INFINIBAND_CXGB3
|
||||
tristate "Chelsio RDMA Driver"
|
||||
depends on CHELSIO_T3
|
||||
select GENERIC_ALLOCATOR
|
||||
---help---
|
||||
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
|
||||
10GbE adapters.
|
||||
|
||||
For general information about Chelsio and our products, visit
|
||||
our website at <http://www.chelsio.com>.
|
||||
|
||||
For customer support, please visit our customer support page at
|
||||
<http://www.chelsio.com/support.html>.
|
||||
|
||||
Please send feedback to <linux-bugs@chelsio.com>.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called iw_cxgb3.
|
@ -1,7 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb3
|
||||
|
||||
obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
|
||||
|
||||
iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
|
||||
iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
|
File diff suppressed because it is too large
Load Diff
@ -1,204 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __CXIO_HAL_H__
|
||||
#define __CXIO_HAL_H__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/kfifo.h>
|
||||
|
||||
#include "t3_cpl.h"
|
||||
#include "t3cdev.h"
|
||||
#include "cxgb3_ctl_defs.h"
|
||||
#include "cxio_wr.h"
|
||||
|
||||
#define T3_CTRL_QP_ID FW_RI_SGEEC_START
|
||||
#define T3_CTL_QP_TID FW_RI_TID_START
|
||||
#define T3_CTRL_QP_SIZE_LOG2 8
|
||||
#define T3_CTRL_CQ_ID 0
|
||||
|
||||
#define T3_MAX_NUM_RI (1<<15)
|
||||
#define T3_MAX_NUM_QP (1<<15)
|
||||
#define T3_MAX_NUM_CQ (1<<15)
|
||||
#define T3_MAX_NUM_PD (1<<15)
|
||||
#define T3_MAX_PBL_SIZE 256
|
||||
#define T3_MAX_RQ_SIZE 1024
|
||||
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
|
||||
#define T3_MAX_CQ_DEPTH 65536
|
||||
#define T3_MAX_NUM_STAG (1<<15)
|
||||
#define T3_MAX_MR_SIZE 0x100000000ULL
|
||||
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
|
||||
|
||||
#define T3_STAG_UNSET 0xffffffff
|
||||
|
||||
#define T3_MAX_DEV_NAME_LEN 32
|
||||
|
||||
#define CXIO_FW_MAJ 7
|
||||
|
||||
struct cxio_hal_ctrl_qp {
|
||||
u32 wptr;
|
||||
u32 rptr;
|
||||
struct mutex lock; /* for the wtpr, can sleep */
|
||||
wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
|
||||
union t3_wr *workq; /* the work request queue */
|
||||
dma_addr_t dma_addr; /* pci bus address of the workq */
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
void __iomem *doorbell;
|
||||
};
|
||||
|
||||
struct cxio_hal_resource {
|
||||
struct kfifo tpt_fifo;
|
||||
spinlock_t tpt_fifo_lock;
|
||||
struct kfifo qpid_fifo;
|
||||
spinlock_t qpid_fifo_lock;
|
||||
struct kfifo cqid_fifo;
|
||||
spinlock_t cqid_fifo_lock;
|
||||
struct kfifo pdid_fifo;
|
||||
spinlock_t pdid_fifo_lock;
|
||||
};
|
||||
|
||||
struct cxio_qpid_list {
|
||||
struct list_head entry;
|
||||
u32 qpid;
|
||||
};
|
||||
|
||||
struct cxio_ucontext {
|
||||
struct list_head qpids;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct cxio_rdev {
|
||||
char dev_name[T3_MAX_DEV_NAME_LEN];
|
||||
struct t3cdev *t3cdev_p;
|
||||
struct rdma_info rnic_info;
|
||||
struct adap_ports port_info;
|
||||
struct cxio_hal_resource *rscp;
|
||||
struct cxio_hal_ctrl_qp ctrl_qp;
|
||||
void *ulp;
|
||||
unsigned long qpshift;
|
||||
u32 qpnr;
|
||||
u32 qpmask;
|
||||
struct cxio_ucontext uctx;
|
||||
struct gen_pool *pbl_pool;
|
||||
struct gen_pool *rqt_pool;
|
||||
struct list_head entry;
|
||||
struct ch_embedded_info fw_info;
|
||||
u32 flags;
|
||||
#define CXIO_ERROR_FATAL 1
|
||||
};
|
||||
|
||||
static inline int cxio_fatal_error(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
return rdev_p->flags & CXIO_ERROR_FATAL;
|
||||
}
|
||||
|
||||
static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
|
||||
}
|
||||
|
||||
typedef void (*cxio_hal_ev_callback_func_t) (struct cxio_rdev * rdev_p,
|
||||
struct sk_buff * skb);
|
||||
|
||||
#define RSPQ_CQID(rsp) (be32_to_cpu(rsp->cq_ptrid) & 0xffff)
|
||||
#define RSPQ_CQPTR(rsp) ((be32_to_cpu(rsp->cq_ptrid) >> 16) & 0xffff)
|
||||
#define RSPQ_GENBIT(rsp) ((be32_to_cpu(rsp->flags) >> 16) & 1)
|
||||
#define RSPQ_OVERFLOW(rsp) ((be32_to_cpu(rsp->flags) >> 17) & 1)
|
||||
#define RSPQ_AN(rsp) ((be32_to_cpu(rsp->flags) >> 18) & 1)
|
||||
#define RSPQ_SE(rsp) ((be32_to_cpu(rsp->flags) >> 19) & 1)
|
||||
#define RSPQ_NOTIFY(rsp) ((be32_to_cpu(rsp->flags) >> 20) & 1)
|
||||
#define RSPQ_CQBRANCH(rsp) ((be32_to_cpu(rsp->flags) >> 21) & 1)
|
||||
#define RSPQ_CREDIT_THRESH(rsp) ((be32_to_cpu(rsp->flags) >> 22) & 1)
|
||||
|
||||
struct respQ_msg_t {
|
||||
__be32 flags; /* flit 0 */
|
||||
__be32 cq_ptrid;
|
||||
__be64 rsvd; /* flit 1 */
|
||||
struct t3_cqe cqe; /* flits 2-3 */
|
||||
};
|
||||
|
||||
enum t3_cq_opcode {
|
||||
CQ_ARM_AN = 0x2,
|
||||
CQ_ARM_SE = 0x6,
|
||||
CQ_FORCE_AN = 0x3,
|
||||
CQ_CREDIT_UPDATE = 0x7
|
||||
};
|
||||
|
||||
int cxio_rdev_open(struct cxio_rdev *rdev);
|
||||
void cxio_rdev_close(struct cxio_rdev *rdev);
|
||||
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
|
||||
enum t3_cq_opcode op, u32 credit);
|
||||
int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
|
||||
void cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
|
||||
void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
|
||||
void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
|
||||
int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
|
||||
struct cxio_ucontext *uctx);
|
||||
int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
|
||||
struct cxio_ucontext *uctx);
|
||||
int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
|
||||
int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
|
||||
u32 pbl_addr, u32 pbl_size);
|
||||
int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
|
||||
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
|
||||
u8 page_size, u32 pbl_size, u32 pbl_addr);
|
||||
int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
|
||||
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
|
||||
u8 page_size, u32 pbl_size, u32 pbl_addr);
|
||||
int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
|
||||
u32 pbl_addr);
|
||||
int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
|
||||
int cxio_allocate_stag(struct cxio_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr);
|
||||
int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
|
||||
int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
|
||||
void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
|
||||
void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
|
||||
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
|
||||
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
|
||||
int __init cxio_hal_init(void);
|
||||
void __exit cxio_hal_exit(void);
|
||||
int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
|
||||
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
|
||||
void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
|
||||
void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
|
||||
void cxio_flush_hw_cq(struct t3_cq *cq);
|
||||
int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
||||
u8 *cqe_flushed, u64 *cookie, u32 *credit);
|
||||
int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);
|
||||
|
||||
#ifdef pr_fmt
|
||||
#undef pr_fmt
|
||||
#endif
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#endif
|
@ -1,344 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
/* Crude resource management */
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/errno.h>
|
||||
#include "cxio_resource.h"
|
||||
#include "cxio_hal.h"
|
||||
|
||||
static struct kfifo rhdl_fifo;
|
||||
static spinlock_t rhdl_fifo_lock;
|
||||
|
||||
#define RANDOM_SIZE 16
|
||||
|
||||
static int __cxio_init_resource_fifo(struct kfifo *fifo,
|
||||
spinlock_t *fifo_lock,
|
||||
u32 nr, u32 skip_low,
|
||||
u32 skip_high,
|
||||
int random)
|
||||
{
|
||||
u32 i, j, entry = 0, idx;
|
||||
u32 random_bytes;
|
||||
u32 rarray[16];
|
||||
spin_lock_init(fifo_lock);
|
||||
|
||||
if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < skip_low + skip_high; i++)
|
||||
kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
|
||||
if (random) {
|
||||
j = 0;
|
||||
random_bytes = prandom_u32();
|
||||
for (i = 0; i < RANDOM_SIZE; i++)
|
||||
rarray[i] = i + skip_low;
|
||||
for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
|
||||
if (j >= RANDOM_SIZE) {
|
||||
j = 0;
|
||||
random_bytes = prandom_u32();
|
||||
}
|
||||
idx = (random_bytes >> (j * 2)) & 0xF;
|
||||
kfifo_in(fifo,
|
||||
(unsigned char *) &rarray[idx],
|
||||
sizeof(u32));
|
||||
rarray[idx] = i;
|
||||
j++;
|
||||
}
|
||||
for (i = 0; i < RANDOM_SIZE; i++)
|
||||
kfifo_in(fifo,
|
||||
(unsigned char *) &rarray[i],
|
||||
sizeof(u32));
|
||||
} else
|
||||
for (i = skip_low; i < nr - skip_high; i++)
|
||||
kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
|
||||
|
||||
for (i = 0; i < skip_low + skip_high; i++)
|
||||
if (kfifo_out_locked(fifo, (unsigned char *) &entry,
|
||||
sizeof(u32), fifo_lock) != sizeof(u32))
|
||||
break;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
|
||||
u32 nr, u32 skip_low, u32 skip_high)
|
||||
{
|
||||
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
|
||||
skip_high, 0));
|
||||
}
|
||||
|
||||
static int cxio_init_resource_fifo_random(struct kfifo *fifo,
|
||||
spinlock_t * fifo_lock,
|
||||
u32 nr, u32 skip_low, u32 skip_high)
|
||||
{
|
||||
|
||||
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
|
||||
skip_high, 1));
|
||||
}
|
||||
|
||||
static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
|
||||
|
||||
if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
|
||||
GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 16; i < T3_MAX_NUM_QP; i++)
|
||||
if (!(i & rdev_p->qpmask))
|
||||
kfifo_in(&rdev_p->rscp->qpid_fifo,
|
||||
(unsigned char *) &i, sizeof(u32));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
|
||||
{
|
||||
return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1,
|
||||
0);
|
||||
}
|
||||
|
||||
void cxio_hal_destroy_rhdl_resource(void)
|
||||
{
|
||||
kfifo_free(&rhdl_fifo);
|
||||
}
|
||||
|
||||
/* nr_* must be power of 2 */
|
||||
int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
|
||||
u32 nr_tpt, u32 nr_pbl,
|
||||
u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid)
|
||||
{
|
||||
int err = 0;
|
||||
struct cxio_hal_resource *rscp;
|
||||
|
||||
rscp = kmalloc(sizeof(*rscp), GFP_KERNEL);
|
||||
if (!rscp)
|
||||
return -ENOMEM;
|
||||
rdev_p->rscp = rscp;
|
||||
err = cxio_init_resource_fifo_random(&rscp->tpt_fifo,
|
||||
&rscp->tpt_fifo_lock,
|
||||
nr_tpt, 1, 0);
|
||||
if (err)
|
||||
goto tpt_err;
|
||||
err = cxio_init_qpid_fifo(rdev_p);
|
||||
if (err)
|
||||
goto qpid_err;
|
||||
err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock,
|
||||
nr_cqid, 1, 0);
|
||||
if (err)
|
||||
goto cqid_err;
|
||||
err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock,
|
||||
nr_pdid, 1, 0);
|
||||
if (err)
|
||||
goto pdid_err;
|
||||
return 0;
|
||||
pdid_err:
|
||||
kfifo_free(&rscp->cqid_fifo);
|
||||
cqid_err:
|
||||
kfifo_free(&rscp->qpid_fifo);
|
||||
qpid_err:
|
||||
kfifo_free(&rscp->tpt_fifo);
|
||||
tpt_err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns 0 if no resource available
|
||||
*/
|
||||
static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
|
||||
{
|
||||
u32 entry;
|
||||
if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
|
||||
return entry;
|
||||
else
|
||||
return 0; /* fifo emptry */
|
||||
}
|
||||
|
||||
static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
|
||||
u32 entry)
|
||||
{
|
||||
BUG_ON(
|
||||
kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
|
||||
== 0);
|
||||
}
|
||||
|
||||
u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
|
||||
}
|
||||
|
||||
void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
|
||||
{
|
||||
cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
|
||||
}
|
||||
|
||||
u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
|
||||
&rscp->qpid_fifo_lock);
|
||||
pr_debug("%s qpid 0x%x\n", __func__, qpid);
|
||||
return qpid;
|
||||
}
|
||||
|
||||
void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
|
||||
{
|
||||
pr_debug("%s qpid 0x%x\n", __func__, qpid);
|
||||
cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
|
||||
}
|
||||
|
||||
u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
|
||||
}
|
||||
|
||||
void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
|
||||
{
|
||||
cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
|
||||
}
|
||||
|
||||
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
|
||||
}
|
||||
|
||||
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
|
||||
{
|
||||
cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
|
||||
}
|
||||
|
||||
void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
kfifo_free(&rscp->tpt_fifo);
|
||||
kfifo_free(&rscp->cqid_fifo);
|
||||
kfifo_free(&rscp->qpid_fifo);
|
||||
kfifo_free(&rscp->pdid_fifo);
|
||||
kfree(rscp);
|
||||
}
|
||||
|
||||
/*
|
||||
* PBL Memory Manager. Uses Linux generic allocator.
|
||||
*/
|
||||
|
||||
#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
|
||||
|
||||
u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
|
||||
{
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
|
||||
gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
|
||||
}
|
||||
|
||||
int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
unsigned pbl_start, pbl_chunk;
|
||||
|
||||
rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
|
||||
if (!rdev_p->pbl_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
pbl_start = rdev_p->rnic_info.pbl_base;
|
||||
pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
|
||||
|
||||
while (pbl_start < rdev_p->rnic_info.pbl_top) {
|
||||
pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
|
||||
pbl_chunk);
|
||||
if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
|
||||
pr_debug("%s failed to add PBL chunk (%x/%x)\n",
|
||||
__func__, pbl_start, pbl_chunk);
|
||||
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
|
||||
pr_warn("%s: Failed to add all PBL chunks (%x/%x)\n",
|
||||
__func__, pbl_start,
|
||||
rdev_p->rnic_info.pbl_top - pbl_start);
|
||||
return 0;
|
||||
}
|
||||
pbl_chunk >>= 1;
|
||||
} else {
|
||||
pr_debug("%s added PBL chunk (%x/%x)\n",
|
||||
__func__, pbl_start, pbl_chunk);
|
||||
pbl_start += pbl_chunk;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
gen_pool_destroy(rdev_p->pbl_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* RQT Memory Manager. Uses Linux generic allocator.
|
||||
*/
|
||||
|
||||
#define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */
|
||||
#define RQT_CHUNK 2*1024*1024
|
||||
|
||||
u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
|
||||
{
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
|
||||
gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
|
||||
}
|
||||
|
||||
int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
unsigned long i;
|
||||
rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
|
||||
if (rdev_p->rqt_pool)
|
||||
for (i = rdev_p->rnic_info.rqt_base;
|
||||
i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
|
||||
i += RQT_CHUNK)
|
||||
gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
|
||||
return rdev_p->rqt_pool ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
|
||||
{
|
||||
gen_pool_destroy(rdev_p->rqt_pool);
|
||||
}
|
@ -1,69 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __CXIO_RESOURCE_H__
|
||||
#define __CXIO_RESOURCE_H__
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include "cxio_hal.h"
|
||||
|
||||
extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
|
||||
extern void cxio_hal_destroy_rhdl_resource(void);
|
||||
extern int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
|
||||
u32 nr_tpt, u32 nr_pbl,
|
||||
u32 nr_rqt, u32 nr_qpid, u32 nr_cqid,
|
||||
u32 nr_pdid);
|
||||
extern u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp);
|
||||
extern void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag);
|
||||
extern u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp);
|
||||
extern void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid);
|
||||
extern u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp);
|
||||
extern void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid);
|
||||
extern void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp);
|
||||
|
||||
#define PBL_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.pbl_base )
|
||||
extern int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p);
|
||||
extern void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p);
|
||||
extern u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size);
|
||||
extern void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
|
||||
|
||||
#define RQT_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.rqt_base )
|
||||
extern int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p);
|
||||
extern void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p);
|
||||
extern u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size);
|
||||
extern void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
|
||||
#endif
|
@ -1,802 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __CXIO_WR_H__
|
||||
#define __CXIO_WR_H__
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/timer.h>
|
||||
#include "firmware_exports.h"
|
||||
|
||||
#define T3_MAX_SGE 4
|
||||
#define T3_MAX_INLINE 64
|
||||
#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
|
||||
#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
|
||||
#define T3_STAG0_PAGE_SHIFT 15
|
||||
|
||||
#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
|
||||
#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
|
||||
((rptr)!=(wptr)) )
|
||||
#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
|
||||
#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
|
||||
#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
|
||||
#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
|
||||
|
||||
static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
|
||||
{
|
||||
writel(((1<<31) | qpid), doorbell);
|
||||
}
|
||||
|
||||
#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
|
||||
|
||||
enum t3_wr_flags {
|
||||
T3_COMPLETION_FLAG = 0x01,
|
||||
T3_NOTIFY_FLAG = 0x02,
|
||||
T3_SOLICITED_EVENT_FLAG = 0x04,
|
||||
T3_READ_FENCE_FLAG = 0x08,
|
||||
T3_LOCAL_FENCE_FLAG = 0x10
|
||||
} __packed;
|
||||
|
||||
enum t3_wr_opcode {
|
||||
T3_WR_BP = FW_WROPCODE_RI_BYPASS,
|
||||
T3_WR_SEND = FW_WROPCODE_RI_SEND,
|
||||
T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
|
||||
T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
|
||||
T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
|
||||
T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
|
||||
T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
|
||||
T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
|
||||
T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
|
||||
T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
|
||||
} __packed;
|
||||
|
||||
enum t3_rdma_opcode {
|
||||
T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
|
||||
T3_READ_REQ,
|
||||
T3_READ_RESP,
|
||||
T3_SEND,
|
||||
T3_SEND_WITH_INV,
|
||||
T3_SEND_WITH_SE,
|
||||
T3_SEND_WITH_SE_INV,
|
||||
T3_TERMINATE,
|
||||
T3_RDMA_INIT, /* CHELSIO RI specific ... */
|
||||
T3_BIND_MW,
|
||||
T3_FAST_REGISTER,
|
||||
T3_LOCAL_INV,
|
||||
T3_QP_MOD,
|
||||
T3_BYPASS,
|
||||
T3_RDMA_READ_REQ_WITH_INV,
|
||||
} __packed;
|
||||
|
||||
static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
|
||||
{
|
||||
switch (wrop) {
|
||||
case T3_WR_BP: return T3_BYPASS;
|
||||
case T3_WR_SEND: return T3_SEND;
|
||||
case T3_WR_WRITE: return T3_RDMA_WRITE;
|
||||
case T3_WR_READ: return T3_READ_REQ;
|
||||
case T3_WR_INV_STAG: return T3_LOCAL_INV;
|
||||
case T3_WR_BIND: return T3_BIND_MW;
|
||||
case T3_WR_INIT: return T3_RDMA_INIT;
|
||||
case T3_WR_QP_MOD: return T3_QP_MOD;
|
||||
case T3_WR_FASTREG: return T3_FAST_REGISTER;
|
||||
default: break;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
/* Work request id */
|
||||
union t3_wrid {
|
||||
struct {
|
||||
u32 hi;
|
||||
u32 low;
|
||||
} id0;
|
||||
u64 id1;
|
||||
};
|
||||
|
||||
#define WRID(wrid) (wrid.id1)
|
||||
#define WRID_GEN(wrid) (wrid.id0.wr_gen)
|
||||
#define WRID_IDX(wrid) (wrid.id0.wr_idx)
|
||||
#define WRID_LO(wrid) (wrid.id0.wr_lo)
|
||||
|
||||
struct fw_riwrh {
|
||||
__be32 op_seop_flags;
|
||||
__be32 gen_tid_len;
|
||||
};
|
||||
|
||||
#define S_FW_RIWR_OP 24
|
||||
#define M_FW_RIWR_OP 0xff
|
||||
#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
|
||||
#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
|
||||
|
||||
#define S_FW_RIWR_SOPEOP 22
|
||||
#define M_FW_RIWR_SOPEOP 0x3
|
||||
#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
|
||||
|
||||
#define S_FW_RIWR_FLAGS 8
|
||||
#define M_FW_RIWR_FLAGS 0x3fffff
|
||||
#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
|
||||
#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
|
||||
|
||||
#define S_FW_RIWR_TID 8
|
||||
#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
|
||||
|
||||
#define S_FW_RIWR_LEN 0
|
||||
#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
|
||||
|
||||
#define S_FW_RIWR_GEN 31
|
||||
#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
|
||||
|
||||
struct t3_sge {
|
||||
__be32 stag;
|
||||
__be32 len;
|
||||
__be64 to;
|
||||
};
|
||||
|
||||
/* If num_sgle is zero, flit 5+ contains immediate data.*/
|
||||
struct t3_send_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
|
||||
u8 rdmaop; /* 2 */
|
||||
u8 reserved[3];
|
||||
__be32 rem_stag;
|
||||
__be32 plen; /* 3 */
|
||||
__be32 num_sgle;
|
||||
struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
|
||||
};
|
||||
|
||||
#define T3_MAX_FASTREG_DEPTH 10
|
||||
#define T3_MAX_FASTREG_FRAG 10
|
||||
|
||||
struct t3_fastreg_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
__be32 stag; /* 2 */
|
||||
__be32 len;
|
||||
__be32 va_base_hi; /* 3 */
|
||||
__be32 va_base_lo_fbo;
|
||||
__be32 page_type_perms; /* 4 */
|
||||
__be32 reserved1;
|
||||
__be64 pbl_addrs[0]; /* 5+ */
|
||||
};
|
||||
|
||||
/*
|
||||
* If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
|
||||
*/
|
||||
struct t3_pbl_frag {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
__be64 pbl_addrs[14]; /* 1..14 */
|
||||
};
|
||||
|
||||
#define S_FR_PAGE_COUNT 24
|
||||
#define M_FR_PAGE_COUNT 0xff
|
||||
#define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
|
||||
#define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
|
||||
|
||||
#define S_FR_PAGE_SIZE 16
|
||||
#define M_FR_PAGE_SIZE 0x1f
|
||||
#define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
|
||||
#define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
|
||||
|
||||
#define S_FR_TYPE 8
|
||||
#define M_FR_TYPE 0x1
|
||||
#define V_FR_TYPE(x) ((x) << S_FR_TYPE)
|
||||
#define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
|
||||
|
||||
#define S_FR_PERMS 0
|
||||
#define M_FR_PERMS 0xff
|
||||
#define V_FR_PERMS(x) ((x) << S_FR_PERMS)
|
||||
#define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
|
||||
|
||||
struct t3_local_inv_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
__be32 stag; /* 2 */
|
||||
__be32 reserved;
|
||||
};
|
||||
|
||||
struct t3_rdma_write_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
u8 rdmaop; /* 2 */
|
||||
u8 reserved[3];
|
||||
__be32 stag_sink;
|
||||
__be64 to_sink; /* 3 */
|
||||
__be32 plen; /* 4 */
|
||||
__be32 num_sgle;
|
||||
struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
|
||||
};
|
||||
|
||||
struct t3_rdma_read_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
u8 rdmaop; /* 2 */
|
||||
u8 local_inv;
|
||||
u8 reserved[2];
|
||||
__be32 rem_stag;
|
||||
__be64 rem_to; /* 3 */
|
||||
__be32 local_stag; /* 4 */
|
||||
__be32 local_len;
|
||||
__be64 local_to; /* 5 */
|
||||
};
|
||||
|
||||
struct t3_bind_mw_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
u16 reserved; /* 2 */
|
||||
u8 type;
|
||||
u8 perms;
|
||||
__be32 mr_stag;
|
||||
__be32 mw_stag; /* 3 */
|
||||
__be32 mw_len;
|
||||
__be64 mw_va; /* 4 */
|
||||
__be32 mr_pbl_addr; /* 5 */
|
||||
u8 reserved2[3];
|
||||
u8 mr_pagesz;
|
||||
};
|
||||
|
||||
struct t3_receive_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
u8 pagesz[T3_MAX_SGE];
|
||||
__be32 num_sgle; /* 2 */
|
||||
struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
|
||||
__be32 pbl_addr[T3_MAX_SGE];
|
||||
};
|
||||
|
||||
struct t3_bypass_wr {
|
||||
struct fw_riwrh wrh;
|
||||
union t3_wrid wrid; /* 1 */
|
||||
};
|
||||
|
||||
struct t3_modify_qp_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
__be32 flags; /* 2 */
|
||||
__be32 quiesce; /* 2 */
|
||||
__be32 max_ird; /* 3 */
|
||||
__be32 max_ord; /* 3 */
|
||||
__be64 sge_cmd; /* 4 */
|
||||
__be64 ctx1; /* 5 */
|
||||
__be64 ctx0; /* 6 */
|
||||
};
|
||||
|
||||
enum t3_modify_qp_flags {
|
||||
MODQP_QUIESCE = 0x01,
|
||||
MODQP_MAX_IRD = 0x02,
|
||||
MODQP_MAX_ORD = 0x04,
|
||||
MODQP_WRITE_EC = 0x08,
|
||||
MODQP_READ_EC = 0x10,
|
||||
};
|
||||
|
||||
|
||||
enum t3_mpa_attrs {
|
||||
uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
|
||||
uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
|
||||
uP_RI_MPA_CRC_ENABLE = 0x4,
|
||||
uP_RI_MPA_IETF_ENABLE = 0x8
|
||||
} __packed;
|
||||
|
||||
enum t3_qp_caps {
|
||||
uP_RI_QP_RDMA_READ_ENABLE = 0x01,
|
||||
uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
|
||||
uP_RI_QP_BIND_ENABLE = 0x04,
|
||||
uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
|
||||
uP_RI_QP_STAG0_ENABLE = 0x10
|
||||
} __packed;
|
||||
|
||||
enum rdma_init_rtr_types {
|
||||
RTR_READ = 1,
|
||||
RTR_WRITE = 2,
|
||||
RTR_SEND = 3,
|
||||
};
|
||||
|
||||
#define S_RTR_TYPE 2
|
||||
#define M_RTR_TYPE 0x3
|
||||
#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
|
||||
#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
|
||||
|
||||
#define S_CHAN 4
|
||||
#define M_CHAN 0x3
|
||||
#define V_CHAN(x) ((x) << S_CHAN)
|
||||
#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
|
||||
|
||||
struct t3_rdma_init_attr {
|
||||
u32 tid;
|
||||
u32 qpid;
|
||||
u32 pdid;
|
||||
u32 scqid;
|
||||
u32 rcqid;
|
||||
u32 rq_addr;
|
||||
u32 rq_size;
|
||||
enum t3_mpa_attrs mpaattrs;
|
||||
enum t3_qp_caps qpcaps;
|
||||
u16 tcp_emss;
|
||||
u32 ord;
|
||||
u32 ird;
|
||||
u64 qp_dma_addr;
|
||||
u32 qp_dma_size;
|
||||
enum rdma_init_rtr_types rtr_type;
|
||||
u16 flags;
|
||||
u16 rqe_count;
|
||||
u32 irs;
|
||||
u32 chan;
|
||||
};
|
||||
|
||||
struct t3_rdma_init_wr {
|
||||
struct fw_riwrh wrh; /* 0 */
|
||||
union t3_wrid wrid; /* 1 */
|
||||
__be32 qpid; /* 2 */
|
||||
__be32 pdid;
|
||||
__be32 scqid; /* 3 */
|
||||
__be32 rcqid;
|
||||
__be32 rq_addr; /* 4 */
|
||||
__be32 rq_size;
|
||||
u8 mpaattrs; /* 5 */
|
||||
u8 qpcaps;
|
||||
__be16 ulpdu_size;
|
||||
__be16 flags_rtr_type;
|
||||
__be16 rqe_count;
|
||||
__be32 ord; /* 6 */
|
||||
__be32 ird;
|
||||
__be64 qp_dma_addr; /* 7 */
|
||||
__be32 qp_dma_size; /* 8 */
|
||||
__be32 irs;
|
||||
};
|
||||
|
||||
struct t3_genbit {
|
||||
u64 flit[15];
|
||||
__be64 genbit;
|
||||
};
|
||||
|
||||
struct t3_wq_in_err {
|
||||
u64 flit[13];
|
||||
u64 err;
|
||||
};
|
||||
|
||||
enum rdma_init_wr_flags {
|
||||
MPA_INITIATOR = (1<<0),
|
||||
PRIV_QP = (1<<1),
|
||||
};
|
||||
|
||||
union t3_wr {
|
||||
struct t3_send_wr send;
|
||||
struct t3_rdma_write_wr write;
|
||||
struct t3_rdma_read_wr read;
|
||||
struct t3_receive_wr recv;
|
||||
struct t3_fastreg_wr fastreg;
|
||||
struct t3_pbl_frag pbl_frag;
|
||||
struct t3_local_inv_wr local_inv;
|
||||
struct t3_bind_mw_wr bind;
|
||||
struct t3_bypass_wr bypass;
|
||||
struct t3_rdma_init_wr init;
|
||||
struct t3_modify_qp_wr qp_mod;
|
||||
struct t3_genbit genbit;
|
||||
struct t3_wq_in_err wq_in_err;
|
||||
__be64 flit[16];
|
||||
};
|
||||
|
||||
#define T3_SQ_CQE_FLIT 13
|
||||
#define T3_SQ_COOKIE_FLIT 14
|
||||
|
||||
#define T3_RQ_COOKIE_FLIT 13
|
||||
#define T3_RQ_CQE_FLIT 14
|
||||
|
||||
static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
|
||||
{
|
||||
return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
|
||||
}
|
||||
|
||||
enum t3_wr_hdr_bits {
|
||||
T3_EOP = 1,
|
||||
T3_SOP = 2,
|
||||
T3_SOPEOP = T3_EOP|T3_SOP,
|
||||
};
|
||||
|
||||
static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
|
||||
enum t3_wr_flags flags, u8 genbit, u32 tid,
|
||||
u8 len, u8 sopeop)
|
||||
{
|
||||
wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
|
||||
V_FW_RIWR_SOPEOP(sopeop) |
|
||||
V_FW_RIWR_FLAGS(flags));
|
||||
wmb();
|
||||
wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
|
||||
V_FW_RIWR_TID(tid) |
|
||||
V_FW_RIWR_LEN(len));
|
||||
/* 2nd gen bit... */
|
||||
((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
|
||||
}
|
||||
|
||||
/*
|
||||
* T3 ULP2_TX commands
|
||||
*/
|
||||
enum t3_utx_mem_op {
|
||||
T3_UTX_MEM_READ = 2,
|
||||
T3_UTX_MEM_WRITE = 3
|
||||
};
|
||||
|
||||
/* T3 MC7 RDMA TPT entry format */
|
||||
|
||||
enum tpt_mem_type {
|
||||
TPT_NON_SHARED_MR = 0x0,
|
||||
TPT_SHARED_MR = 0x1,
|
||||
TPT_MW = 0x2,
|
||||
TPT_MW_RELAXED_PROTECTION = 0x3
|
||||
};
|
||||
|
||||
enum tpt_addr_type {
|
||||
TPT_ZBTO = 0,
|
||||
TPT_VATO = 1
|
||||
};
|
||||
|
||||
enum tpt_mem_perm {
|
||||
TPT_MW_BIND = 0x10,
|
||||
TPT_LOCAL_READ = 0x8,
|
||||
TPT_LOCAL_WRITE = 0x4,
|
||||
TPT_REMOTE_READ = 0x2,
|
||||
TPT_REMOTE_WRITE = 0x1
|
||||
};
|
||||
|
||||
struct tpt_entry {
|
||||
__be32 valid_stag_pdid;
|
||||
__be32 flags_pagesize_qpid;
|
||||
|
||||
__be32 rsvd_pbl_addr;
|
||||
__be32 len;
|
||||
__be32 va_hi;
|
||||
__be32 va_low_or_fbo;
|
||||
|
||||
__be32 rsvd_bind_cnt_or_pstag;
|
||||
__be32 rsvd_pbl_size;
|
||||
};
|
||||
|
||||
#define S_TPT_VALID 31
|
||||
#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
|
||||
#define F_TPT_VALID V_TPT_VALID(1U)
|
||||
|
||||
#define S_TPT_STAG_KEY 23
|
||||
#define M_TPT_STAG_KEY 0xFF
|
||||
#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
|
||||
#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
|
||||
|
||||
#define S_TPT_STAG_STATE 22
|
||||
#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
|
||||
#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
|
||||
|
||||
#define S_TPT_STAG_TYPE 20
|
||||
#define M_TPT_STAG_TYPE 0x3
|
||||
#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
|
||||
#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
|
||||
|
||||
#define S_TPT_PDID 0
|
||||
#define M_TPT_PDID 0xFFFFF
|
||||
#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
|
||||
#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
|
||||
|
||||
#define S_TPT_PERM 28
|
||||
#define M_TPT_PERM 0xF
|
||||
#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
|
||||
#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
|
||||
|
||||
#define S_TPT_REM_INV_DIS 27
|
||||
#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
|
||||
#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
|
||||
|
||||
#define S_TPT_ADDR_TYPE 26
|
||||
#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
|
||||
#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
|
||||
|
||||
#define S_TPT_MW_BIND_ENABLE 25
|
||||
#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
|
||||
#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
|
||||
|
||||
#define S_TPT_PAGE_SIZE 20
|
||||
#define M_TPT_PAGE_SIZE 0x1F
|
||||
#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
|
||||
#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
|
||||
|
||||
#define S_TPT_PBL_ADDR 0
|
||||
#define M_TPT_PBL_ADDR 0x1FFFFFFF
|
||||
#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
|
||||
#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
|
||||
|
||||
#define S_TPT_QPID 0
|
||||
#define M_TPT_QPID 0xFFFFF
|
||||
#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
|
||||
#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
|
||||
|
||||
#define S_TPT_PSTAG 0
|
||||
#define M_TPT_PSTAG 0xFFFFFF
|
||||
#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
|
||||
#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
|
||||
|
||||
#define S_TPT_PBL_SIZE 0
|
||||
#define M_TPT_PBL_SIZE 0xFFFFF
|
||||
#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
|
||||
#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
|
||||
|
||||
/*
|
||||
* CQE defs
|
||||
*/
|
||||
struct t3_cqe {
|
||||
__be32 header;
|
||||
__be32 len;
|
||||
union {
|
||||
struct {
|
||||
__be32 stag;
|
||||
__be32 msn;
|
||||
} rcqe;
|
||||
struct {
|
||||
u32 wrid_hi;
|
||||
u32 wrid_low;
|
||||
} scqe;
|
||||
} u;
|
||||
};
|
||||
|
||||
#define S_CQE_OOO 31
|
||||
#define M_CQE_OOO 0x1
|
||||
#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
|
||||
#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
|
||||
|
||||
#define S_CQE_QPID 12
|
||||
#define M_CQE_QPID 0x7FFFF
|
||||
#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
|
||||
#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
|
||||
|
||||
#define S_CQE_SWCQE 11
|
||||
#define M_CQE_SWCQE 0x1
|
||||
#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
|
||||
#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
|
||||
|
||||
#define S_CQE_GENBIT 10
|
||||
#define M_CQE_GENBIT 0x1
|
||||
#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
|
||||
#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
|
||||
|
||||
#define S_CQE_STATUS 5
|
||||
#define M_CQE_STATUS 0x1F
|
||||
#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
|
||||
#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
|
||||
|
||||
#define S_CQE_TYPE 4
|
||||
#define M_CQE_TYPE 0x1
|
||||
#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
|
||||
#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
|
||||
|
||||
#define S_CQE_OPCODE 0
|
||||
#define M_CQE_OPCODE 0xF
|
||||
#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
|
||||
#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
|
||||
|
||||
#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
|
||||
#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
|
||||
#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
|
||||
#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
|
||||
#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
|
||||
#define SQ_TYPE(x) (CQE_TYPE((x)))
|
||||
#define RQ_TYPE(x) (!CQE_TYPE((x)))
|
||||
#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
|
||||
#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
|
||||
|
||||
#define CQE_SEND_OPCODE(x)( \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
|
||||
(G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
|
||||
|
||||
#define CQE_LEN(x) (be32_to_cpu((x).len))
|
||||
|
||||
/* used for RQ completion processing */
|
||||
#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
|
||||
#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
|
||||
|
||||
/* used for SQ completion processing */
|
||||
#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
|
||||
#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
|
||||
|
||||
/* generic accessor macros */
|
||||
#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
|
||||
#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
|
||||
|
||||
#define TPT_ERR_SUCCESS 0x0
|
||||
#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
|
||||
/* STAG is offlimt, being 0, */
|
||||
/* or STAG_key mismatch */
|
||||
#define TPT_ERR_PDID 0x2 /* PDID mismatch */
|
||||
#define TPT_ERR_QPID 0x3 /* QPID mismatch */
|
||||
#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
|
||||
#define TPT_ERR_WRAP 0x5 /* Wrap error */
|
||||
#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
|
||||
#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
|
||||
/* shared memory region */
|
||||
#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
|
||||
/* shared memory region */
|
||||
#define TPT_ERR_ECC 0x9 /* ECC error detected */
|
||||
#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
|
||||
/* reading PSTAG for a MW */
|
||||
/* Invalidate */
|
||||
#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
|
||||
/* software error */
|
||||
#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
|
||||
#define TPT_ERR_CRC 0x10 /* CRC error */
|
||||
#define TPT_ERR_MARKER 0x11 /* Marker error */
|
||||
#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
|
||||
#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
|
||||
#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
|
||||
#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
|
||||
#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
|
||||
#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
|
||||
#define TPT_ERR_MSN 0x18 /* MSN error */
|
||||
#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
|
||||
#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
|
||||
/* or READ_REQ */
|
||||
#define TPT_ERR_MSN_GAP 0x1B
|
||||
#define TPT_ERR_MSN_RANGE 0x1C
|
||||
#define TPT_ERR_IRD_OVERFLOW 0x1D
|
||||
#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
|
||||
/* software error */
|
||||
#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
|
||||
/* mismatch) */
|
||||
|
||||
struct t3_swsq {
|
||||
__u64 wr_id;
|
||||
struct t3_cqe cqe;
|
||||
__u32 sq_wptr;
|
||||
__be32 read_len;
|
||||
int opcode;
|
||||
int complete;
|
||||
int signaled;
|
||||
};
|
||||
|
||||
struct t3_swrq {
|
||||
__u64 wr_id;
|
||||
__u32 pbl_addr;
|
||||
};
|
||||
|
||||
/*
|
||||
* A T3 WQ implements both the SQ and RQ.
|
||||
*/
|
||||
struct t3_wq {
|
||||
union t3_wr *queue; /* DMA accessible memory */
|
||||
dma_addr_t dma_addr; /* DMA address for HW */
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
|
||||
u32 error; /* 1 once we go to ERROR */
|
||||
u32 qpid;
|
||||
u32 wptr; /* idx to next available WR slot */
|
||||
u32 size_log2; /* total wq size */
|
||||
struct t3_swsq *sq; /* SW SQ */
|
||||
struct t3_swsq *oldest_read; /* tracks oldest pending read */
|
||||
u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
|
||||
u32 sq_rptr; /* pending wrs */
|
||||
u32 sq_size_log2; /* sq size */
|
||||
struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
|
||||
u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
|
||||
u32 rq_rptr; /* pending wrs */
|
||||
struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
|
||||
u32 rq_size_log2; /* rq size */
|
||||
u32 rq_addr; /* rq adapter address */
|
||||
void __iomem *doorbell; /* kernel db */
|
||||
u64 udb; /* user db if any */
|
||||
struct cxio_rdev *rdev;
|
||||
};
|
||||
|
||||
struct t3_cq {
|
||||
u32 cqid;
|
||||
u32 rptr;
|
||||
u32 wptr;
|
||||
u32 size_log2;
|
||||
dma_addr_t dma_addr;
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
struct t3_cqe *queue;
|
||||
struct t3_cqe *sw_queue;
|
||||
u32 sw_rptr;
|
||||
u32 sw_wptr;
|
||||
};
|
||||
|
||||
#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
|
||||
CQE_GENBIT(*cqe))
|
||||
|
||||
struct t3_cq_status_page {
|
||||
u32 cq_err;
|
||||
};
|
||||
|
||||
static inline int cxio_cq_in_error(struct t3_cq *cq)
|
||||
{
|
||||
return ((struct t3_cq_status_page *)
|
||||
&cq->queue[1 << cq->size_log2])->cq_err;
|
||||
}
|
||||
|
||||
static inline void cxio_set_cq_in_error(struct t3_cq *cq)
|
||||
{
|
||||
((struct t3_cq_status_page *)
|
||||
&cq->queue[1 << cq->size_log2])->cq_err = 1;
|
||||
}
|
||||
|
||||
static inline void cxio_set_wq_in_error(struct t3_wq *wq)
|
||||
{
|
||||
wq->queue->wq_in_err.err |= 1;
|
||||
}
|
||||
|
||||
static inline void cxio_disable_wq_db(struct t3_wq *wq)
|
||||
{
|
||||
wq->queue->wq_in_err.err |= 2;
|
||||
}
|
||||
|
||||
static inline void cxio_enable_wq_db(struct t3_wq *wq)
|
||||
{
|
||||
wq->queue->wq_in_err.err &= ~2;
|
||||
}
|
||||
|
||||
static inline int cxio_wq_db_enabled(struct t3_wq *wq)
|
||||
{
|
||||
return !(wq->queue->wq_in_err.err & 2);
|
||||
}
|
||||
|
||||
static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
|
||||
{
|
||||
struct t3_cqe *cqe;
|
||||
|
||||
cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
|
||||
if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
|
||||
return cqe;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
|
||||
{
|
||||
struct t3_cqe *cqe;
|
||||
|
||||
if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
|
||||
cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
|
||||
return cqe;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
|
||||
{
|
||||
struct t3_cqe *cqe;
|
||||
|
||||
if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
|
||||
cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
|
||||
return cqe;
|
||||
}
|
||||
cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
|
||||
if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
|
||||
return cqe;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
@ -1,282 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#include "cxgb3_offload.h"
|
||||
#include "iwch_provider.h"
|
||||
#include <rdma/cxgb3-abi.h>
|
||||
#include "iwch.h"
|
||||
#include "iwch_cm.h"
|
||||
|
||||
#define DRV_VERSION "1.1"
|
||||
|
||||
MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
|
||||
MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
static void open_rnic_dev(struct t3cdev *);
|
||||
static void close_rnic_dev(struct t3cdev *);
|
||||
static void iwch_event_handler(struct t3cdev *, u32, u32);
|
||||
|
||||
struct cxgb3_client t3c_client = {
|
||||
.name = "iw_cxgb3",
|
||||
.add = open_rnic_dev,
|
||||
.remove = close_rnic_dev,
|
||||
.handlers = t3c_handlers,
|
||||
.redirect = iwch_ep_redirect,
|
||||
.event_handler = iwch_event_handler
|
||||
};
|
||||
|
||||
static LIST_HEAD(dev_list);
|
||||
static DEFINE_MUTEX(dev_mutex);
|
||||
|
||||
static void disable_dbs(struct iwch_dev *rnicp)
|
||||
{
|
||||
unsigned long index;
|
||||
struct iwch_qp *qhp;
|
||||
|
||||
xa_lock_irq(&rnicp->qps);
|
||||
xa_for_each(&rnicp->qps, index, qhp)
|
||||
cxio_disable_wq_db(&qhp->wq);
|
||||
xa_unlock_irq(&rnicp->qps);
|
||||
}
|
||||
|
||||
static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
|
||||
{
|
||||
unsigned long index;
|
||||
struct iwch_qp *qhp;
|
||||
|
||||
xa_lock_irq(&rnicp->qps);
|
||||
xa_for_each(&rnicp->qps, index, qhp) {
|
||||
if (ring_db)
|
||||
ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell,
|
||||
qhp->wq.qpid);
|
||||
cxio_enable_wq_db(&qhp->wq);
|
||||
}
|
||||
xa_unlock_irq(&rnicp->qps);
|
||||
}
|
||||
|
||||
static void iwch_db_drop_task(struct work_struct *work)
|
||||
{
|
||||
struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
|
||||
db_drop_task.work);
|
||||
enable_dbs(rnicp, 1);
|
||||
}
|
||||
|
||||
static void rnic_init(struct iwch_dev *rnicp)
|
||||
{
|
||||
pr_debug("%s iwch_dev %p\n", __func__, rnicp);
|
||||
xa_init_flags(&rnicp->cqs, XA_FLAGS_LOCK_IRQ);
|
||||
xa_init_flags(&rnicp->qps, XA_FLAGS_LOCK_IRQ);
|
||||
xa_init_flags(&rnicp->mrs, XA_FLAGS_LOCK_IRQ);
|
||||
INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
|
||||
|
||||
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
|
||||
rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
|
||||
rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
|
||||
rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
|
||||
rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
|
||||
rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
|
||||
rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
|
||||
rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
|
||||
rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
|
||||
rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
|
||||
rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
|
||||
rnicp->attr.can_resize_wq = 0;
|
||||
rnicp->attr.max_rdma_reads_per_qp = 8;
|
||||
rnicp->attr.max_rdma_read_resources =
|
||||
rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
|
||||
rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
|
||||
rnicp->attr.max_rdma_read_depth =
|
||||
rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
|
||||
rnicp->attr.rq_overflow_handled = 0;
|
||||
rnicp->attr.can_modify_ird = 0;
|
||||
rnicp->attr.can_modify_ord = 0;
|
||||
rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
|
||||
rnicp->attr.stag0_value = 1;
|
||||
rnicp->attr.zbva_support = 1;
|
||||
rnicp->attr.local_invalidate_fence = 1;
|
||||
rnicp->attr.cq_overflow_detection = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
static void open_rnic_dev(struct t3cdev *tdev)
|
||||
{
|
||||
struct iwch_dev *rnicp;
|
||||
|
||||
pr_debug("%s t3cdev %p\n", __func__, tdev);
|
||||
pr_info_once("Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION);
|
||||
rnicp = ib_alloc_device(iwch_dev, ibdev);
|
||||
if (!rnicp) {
|
||||
pr_err("Cannot allocate ib device\n");
|
||||
return;
|
||||
}
|
||||
rnicp->rdev.ulp = rnicp;
|
||||
rnicp->rdev.t3cdev_p = tdev;
|
||||
|
||||
mutex_lock(&dev_mutex);
|
||||
|
||||
if (cxio_rdev_open(&rnicp->rdev)) {
|
||||
mutex_unlock(&dev_mutex);
|
||||
pr_err("Unable to open CXIO rdev\n");
|
||||
ib_dealloc_device(&rnicp->ibdev);
|
||||
return;
|
||||
}
|
||||
|
||||
rnic_init(rnicp);
|
||||
|
||||
list_add_tail(&rnicp->entry, &dev_list);
|
||||
mutex_unlock(&dev_mutex);
|
||||
|
||||
if (iwch_register_device(rnicp)) {
|
||||
pr_err("Unable to register device\n");
|
||||
close_rnic_dev(tdev);
|
||||
}
|
||||
pr_info("Initialized device %s\n",
|
||||
pci_name(rnicp->rdev.rnic_info.pdev));
|
||||
return;
|
||||
}
|
||||
|
||||
static void close_rnic_dev(struct t3cdev *tdev)
|
||||
{
|
||||
struct iwch_dev *dev, *tmp;
|
||||
pr_debug("%s t3cdev %p\n", __func__, tdev);
|
||||
mutex_lock(&dev_mutex);
|
||||
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
|
||||
if (dev->rdev.t3cdev_p == tdev) {
|
||||
dev->rdev.flags = CXIO_ERROR_FATAL;
|
||||
synchronize_net();
|
||||
cancel_delayed_work_sync(&dev->db_drop_task);
|
||||
list_del(&dev->entry);
|
||||
iwch_unregister_device(dev);
|
||||
cxio_rdev_close(&dev->rdev);
|
||||
WARN_ON(!xa_empty(&dev->cqs));
|
||||
WARN_ON(!xa_empty(&dev->qps));
|
||||
WARN_ON(!xa_empty(&dev->mrs));
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev_mutex);
|
||||
}
|
||||
|
||||
static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
|
||||
{
|
||||
struct cxio_rdev *rdev = tdev->ulp;
|
||||
struct iwch_dev *rnicp;
|
||||
struct ib_event event;
|
||||
u32 portnum = port_id + 1;
|
||||
int dispatch = 0;
|
||||
|
||||
if (!rdev)
|
||||
return;
|
||||
rnicp = rdev_to_iwch_dev(rdev);
|
||||
switch (evt) {
|
||||
case OFFLOAD_STATUS_DOWN: {
|
||||
rdev->flags = CXIO_ERROR_FATAL;
|
||||
synchronize_net();
|
||||
event.event = IB_EVENT_DEVICE_FATAL;
|
||||
dispatch = 1;
|
||||
break;
|
||||
}
|
||||
case OFFLOAD_PORT_DOWN: {
|
||||
event.event = IB_EVENT_PORT_ERR;
|
||||
dispatch = 1;
|
||||
break;
|
||||
}
|
||||
case OFFLOAD_PORT_UP: {
|
||||
event.event = IB_EVENT_PORT_ACTIVE;
|
||||
dispatch = 1;
|
||||
break;
|
||||
}
|
||||
case OFFLOAD_DB_FULL: {
|
||||
disable_dbs(rnicp);
|
||||
break;
|
||||
}
|
||||
case OFFLOAD_DB_EMPTY: {
|
||||
enable_dbs(rnicp, 1);
|
||||
break;
|
||||
}
|
||||
case OFFLOAD_DB_DROP: {
|
||||
unsigned long delay = 1000;
|
||||
unsigned short r;
|
||||
|
||||
disable_dbs(rnicp);
|
||||
get_random_bytes(&r, 2);
|
||||
delay += r & 1023;
|
||||
|
||||
/*
|
||||
* delay is between 1000-2023 usecs.
|
||||
*/
|
||||
schedule_delayed_work(&rnicp->db_drop_task,
|
||||
usecs_to_jiffies(delay));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (dispatch) {
|
||||
event.device = &rnicp->ibdev;
|
||||
event.element.port_num = portnum;
|
||||
ib_dispatch_event(&event);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int __init iwch_init_module(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = cxio_hal_init();
|
||||
if (err)
|
||||
return err;
|
||||
err = iwch_cm_init();
|
||||
if (err)
|
||||
return err;
|
||||
cxio_register_ev_cb(iwch_ev_dispatch);
|
||||
cxgb3_register_client(&t3c_client);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit iwch_exit_module(void)
|
||||
{
|
||||
cxgb3_unregister_client(&t3c_client);
|
||||
cxio_unregister_ev_cb(iwch_ev_dispatch);
|
||||
iwch_cm_term();
|
||||
cxio_hal_exit();
|
||||
}
|
||||
|
||||
module_init(iwch_init_module);
|
||||
module_exit(iwch_exit_module);
|
@ -1,155 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __IWCH_H__
|
||||
#define __IWCH_H__
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#include "cxio_hal.h"
|
||||
#include "cxgb3_offload.h"
|
||||
|
||||
struct iwch_pd;
|
||||
struct iwch_cq;
|
||||
struct iwch_qp;
|
||||
struct iwch_mr;
|
||||
|
||||
struct iwch_rnic_attributes {
|
||||
u32 max_qps;
|
||||
u32 max_wrs; /* Max for any SQ/RQ */
|
||||
u32 max_sge_per_wr;
|
||||
u32 max_sge_per_rdma_write_wr; /* for RDMA Write WR */
|
||||
u32 max_cqs;
|
||||
u32 max_cqes_per_cq;
|
||||
u32 max_mem_regs;
|
||||
u32 max_phys_buf_entries; /* for phys buf list */
|
||||
u32 max_pds;
|
||||
|
||||
/*
|
||||
* The memory page sizes supported by this RNIC.
|
||||
* Bit position i in bitmap indicates page of
|
||||
* size (4k)^i. Phys block list mode unsupported.
|
||||
*/
|
||||
u32 mem_pgsizes_bitmask;
|
||||
u64 max_mr_size;
|
||||
u8 can_resize_wq;
|
||||
|
||||
/*
|
||||
* The maximum number of RDMA Reads that can be outstanding
|
||||
* per QP with this RNIC as the target.
|
||||
*/
|
||||
u32 max_rdma_reads_per_qp;
|
||||
|
||||
/*
|
||||
* The maximum number of resources used for RDMA Reads
|
||||
* by this RNIC with this RNIC as the target.
|
||||
*/
|
||||
u32 max_rdma_read_resources;
|
||||
|
||||
/*
|
||||
* The max depth per QP for initiation of RDMA Read
|
||||
* by this RNIC.
|
||||
*/
|
||||
u32 max_rdma_read_qp_depth;
|
||||
|
||||
/*
|
||||
* The maximum depth for initiation of RDMA Read
|
||||
* operations by this RNIC on all QPs
|
||||
*/
|
||||
u32 max_rdma_read_depth;
|
||||
u8 rq_overflow_handled;
|
||||
u32 can_modify_ird;
|
||||
u32 can_modify_ord;
|
||||
u32 max_mem_windows;
|
||||
u32 stag0_value;
|
||||
u8 zbva_support;
|
||||
u8 local_invalidate_fence;
|
||||
u32 cq_overflow_detection;
|
||||
};
|
||||
|
||||
struct iwch_dev {
|
||||
struct ib_device ibdev;
|
||||
struct cxio_rdev rdev;
|
||||
u32 device_cap_flags;
|
||||
struct iwch_rnic_attributes attr;
|
||||
struct xarray cqs;
|
||||
struct xarray qps;
|
||||
struct xarray mrs;
|
||||
struct list_head entry;
|
||||
struct delayed_work db_drop_task;
|
||||
};
|
||||
|
||||
static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
|
||||
{
|
||||
return container_of(ibdev, struct iwch_dev, ibdev);
|
||||
}
|
||||
|
||||
static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
|
||||
{
|
||||
return container_of(rdev, struct iwch_dev, rdev);
|
||||
}
|
||||
|
||||
static inline int t3b_device(const struct iwch_dev *rhp)
|
||||
{
|
||||
return rhp->rdev.t3cdev_p->type == T3B;
|
||||
}
|
||||
|
||||
static inline int t3a_device(const struct iwch_dev *rhp)
|
||||
{
|
||||
return rhp->rdev.t3cdev_p->type == T3A;
|
||||
}
|
||||
|
||||
static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
|
||||
{
|
||||
return xa_load(&rhp->cqs, cqid);
|
||||
}
|
||||
|
||||
static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
|
||||
{
|
||||
return xa_load(&rhp->qps, qpid);
|
||||
}
|
||||
|
||||
static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
|
||||
{
|
||||
return xa_load(&rhp->mrs, mmid);
|
||||
}
|
||||
|
||||
extern struct cxgb3_client t3c_client;
|
||||
extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
|
||||
extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,233 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef _IWCH_CM_H_
|
||||
#define _IWCH_CM_H_
|
||||
|
||||
#include <linux/inet.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/iw_cm.h>
|
||||
|
||||
#include "cxgb3_offload.h"
|
||||
#include "iwch_provider.h"
|
||||
|
||||
#define MPA_KEY_REQ "MPA ID Req Frame"
|
||||
#define MPA_KEY_REP "MPA ID Rep Frame"
|
||||
|
||||
#define MPA_MAX_PRIVATE_DATA 256
|
||||
#define MPA_REV 0 /* XXX - amso1100 uses rev 0 ! */
|
||||
#define MPA_REJECT 0x20
|
||||
#define MPA_CRC 0x40
|
||||
#define MPA_MARKERS 0x80
|
||||
#define MPA_FLAGS_MASK 0xE0
|
||||
|
||||
#define put_ep(ep) { \
|
||||
pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
|
||||
__func__, __LINE__, ep, kref_read(&((ep)->kref))); \
|
||||
WARN_ON(kref_read(&((ep)->kref)) < 1); \
|
||||
kref_put(&((ep)->kref), __free_ep); \
|
||||
}
|
||||
|
||||
#define get_ep(ep) { \
|
||||
pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
|
||||
__func__, __LINE__, ep, kref_read(&((ep)->kref))); \
|
||||
kref_get(&((ep)->kref)); \
|
||||
}
|
||||
|
||||
struct mpa_message {
|
||||
u8 key[16];
|
||||
u8 flags;
|
||||
u8 revision;
|
||||
__be16 private_data_size;
|
||||
u8 private_data[0];
|
||||
};
|
||||
|
||||
struct terminate_message {
|
||||
u8 layer_etype;
|
||||
u8 ecode;
|
||||
__be16 hdrct_rsvd;
|
||||
u8 len_hdrs[0];
|
||||
};
|
||||
|
||||
#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
|
||||
|
||||
enum iwch_layers_types {
|
||||
LAYER_RDMAP = 0x00,
|
||||
LAYER_DDP = 0x10,
|
||||
LAYER_MPA = 0x20,
|
||||
RDMAP_LOCAL_CATA = 0x00,
|
||||
RDMAP_REMOTE_PROT = 0x01,
|
||||
RDMAP_REMOTE_OP = 0x02,
|
||||
DDP_LOCAL_CATA = 0x00,
|
||||
DDP_TAGGED_ERR = 0x01,
|
||||
DDP_UNTAGGED_ERR = 0x02,
|
||||
DDP_LLP = 0x03
|
||||
};
|
||||
|
||||
enum iwch_rdma_ecodes {
|
||||
RDMAP_INV_STAG = 0x00,
|
||||
RDMAP_BASE_BOUNDS = 0x01,
|
||||
RDMAP_ACC_VIOL = 0x02,
|
||||
RDMAP_STAG_NOT_ASSOC = 0x03,
|
||||
RDMAP_TO_WRAP = 0x04,
|
||||
RDMAP_INV_VERS = 0x05,
|
||||
RDMAP_INV_OPCODE = 0x06,
|
||||
RDMAP_STREAM_CATA = 0x07,
|
||||
RDMAP_GLOBAL_CATA = 0x08,
|
||||
RDMAP_CANT_INV_STAG = 0x09,
|
||||
RDMAP_UNSPECIFIED = 0xff
|
||||
};
|
||||
|
||||
enum iwch_ddp_ecodes {
|
||||
DDPT_INV_STAG = 0x00,
|
||||
DDPT_BASE_BOUNDS = 0x01,
|
||||
DDPT_STAG_NOT_ASSOC = 0x02,
|
||||
DDPT_TO_WRAP = 0x03,
|
||||
DDPT_INV_VERS = 0x04,
|
||||
DDPU_INV_QN = 0x01,
|
||||
DDPU_INV_MSN_NOBUF = 0x02,
|
||||
DDPU_INV_MSN_RANGE = 0x03,
|
||||
DDPU_INV_MO = 0x04,
|
||||
DDPU_MSG_TOOBIG = 0x05,
|
||||
DDPU_INV_VERS = 0x06
|
||||
};
|
||||
|
||||
enum iwch_mpa_ecodes {
|
||||
MPA_CRC_ERR = 0x02,
|
||||
MPA_MARKER_ERR = 0x03
|
||||
};
|
||||
|
||||
enum iwch_ep_state {
|
||||
IDLE = 0,
|
||||
LISTEN,
|
||||
CONNECTING,
|
||||
MPA_REQ_WAIT,
|
||||
MPA_REQ_SENT,
|
||||
MPA_REQ_RCVD,
|
||||
MPA_REP_SENT,
|
||||
FPDU_MODE,
|
||||
ABORTING,
|
||||
CLOSING,
|
||||
MORIBUND,
|
||||
DEAD,
|
||||
};
|
||||
|
||||
enum iwch_ep_flags {
|
||||
PEER_ABORT_IN_PROGRESS = 0,
|
||||
ABORT_REQ_IN_PROGRESS = 1,
|
||||
RELEASE_RESOURCES = 2,
|
||||
CLOSE_SENT = 3,
|
||||
};
|
||||
|
||||
struct iwch_ep_common {
|
||||
struct iw_cm_id *cm_id;
|
||||
struct iwch_qp *qp;
|
||||
struct t3cdev *tdev;
|
||||
enum iwch_ep_state state;
|
||||
struct kref kref;
|
||||
spinlock_t lock;
|
||||
struct sockaddr_in local_addr;
|
||||
struct sockaddr_in remote_addr;
|
||||
wait_queue_head_t waitq;
|
||||
int rpl_done;
|
||||
int rpl_err;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
struct iwch_listen_ep {
|
||||
struct iwch_ep_common com;
|
||||
unsigned int stid;
|
||||
int backlog;
|
||||
};
|
||||
|
||||
struct iwch_ep {
|
||||
struct iwch_ep_common com;
|
||||
struct iwch_ep *parent_ep;
|
||||
struct timer_list timer;
|
||||
unsigned int atid;
|
||||
u32 hwtid;
|
||||
u32 snd_seq;
|
||||
u32 rcv_seq;
|
||||
struct l2t_entry *l2t;
|
||||
struct dst_entry *dst;
|
||||
struct sk_buff *mpa_skb;
|
||||
struct iwch_mpa_attributes mpa_attr;
|
||||
unsigned int mpa_pkt_len;
|
||||
u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
|
||||
u8 tos;
|
||||
u16 emss;
|
||||
u16 plen;
|
||||
u32 ird;
|
||||
u32 ord;
|
||||
};
|
||||
|
||||
static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
|
||||
{
|
||||
return cm_id->provider_data;
|
||||
}
|
||||
|
||||
static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
|
||||
{
|
||||
return cm_id->provider_data;
|
||||
}
|
||||
|
||||
static inline int compute_wscale(int win)
|
||||
{
|
||||
int wscale = 0;
|
||||
|
||||
while (wscale < 14 && (65535<<wscale) < win)
|
||||
wscale++;
|
||||
return wscale;
|
||||
}
|
||||
|
||||
/* CM prototypes */
|
||||
|
||||
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
|
||||
int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
|
||||
int iwch_destroy_listen(struct iw_cm_id *cm_id);
|
||||
int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
|
||||
int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
|
||||
int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
|
||||
int iwch_quiesce_tid(struct iwch_ep *ep);
|
||||
int iwch_resume_tid(struct iwch_ep *ep);
|
||||
void __free_ep(struct kref *kref);
|
||||
void iwch_rearp(struct iwch_ep *ep);
|
||||
int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t);
|
||||
|
||||
int __init iwch_cm_init(void);
|
||||
void __exit iwch_cm_term(void);
|
||||
extern int peer2peer;
|
||||
|
||||
#endif /* _IWCH_CM_H_ */
|
@ -1,230 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include "iwch_provider.h"
|
||||
#include "iwch.h"
|
||||
|
||||
static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
|
||||
struct iwch_qp *qhp, struct ib_wc *wc)
|
||||
{
|
||||
struct t3_wq *wq = qhp ? &qhp->wq : NULL;
|
||||
struct t3_cqe cqe;
|
||||
u32 credit = 0;
|
||||
u8 cqe_flushed;
|
||||
u64 cookie;
|
||||
int ret = 1;
|
||||
|
||||
ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
|
||||
&credit);
|
||||
if (t3a_device(chp->rhp) && credit) {
|
||||
pr_debug("%s updating %d cq credits on id %d\n", __func__,
|
||||
credit, chp->cq.cqid);
|
||||
cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
ret = 1;
|
||||
|
||||
wc->wr_id = cookie;
|
||||
wc->qp = qhp ? &qhp->ibqp : NULL;
|
||||
wc->vendor_err = CQE_STATUS(cqe);
|
||||
wc->wc_flags = 0;
|
||||
|
||||
pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
|
||||
__func__,
|
||||
CQE_QPID(cqe), CQE_TYPE(cqe),
|
||||
CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
|
||||
CQE_WRID_LOW(cqe), (unsigned long long)cookie);
|
||||
|
||||
if (CQE_TYPE(cqe) == 0) {
|
||||
if (!CQE_STATUS(cqe))
|
||||
wc->byte_len = CQE_LEN(cqe);
|
||||
else
|
||||
wc->byte_len = 0;
|
||||
wc->opcode = IB_WC_RECV;
|
||||
if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
|
||||
CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
|
||||
wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
|
||||
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
|
||||
}
|
||||
} else {
|
||||
switch (CQE_OPCODE(cqe)) {
|
||||
case T3_RDMA_WRITE:
|
||||
wc->opcode = IB_WC_RDMA_WRITE;
|
||||
break;
|
||||
case T3_READ_REQ:
|
||||
wc->opcode = IB_WC_RDMA_READ;
|
||||
wc->byte_len = CQE_LEN(cqe);
|
||||
break;
|
||||
case T3_SEND:
|
||||
case T3_SEND_WITH_SE:
|
||||
case T3_SEND_WITH_INV:
|
||||
case T3_SEND_WITH_SE_INV:
|
||||
wc->opcode = IB_WC_SEND;
|
||||
break;
|
||||
case T3_LOCAL_INV:
|
||||
wc->opcode = IB_WC_LOCAL_INV;
|
||||
break;
|
||||
case T3_FAST_REGISTER:
|
||||
wc->opcode = IB_WC_REG_MR;
|
||||
break;
|
||||
default:
|
||||
pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
|
||||
CQE_OPCODE(cqe), CQE_QPID(cqe));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (cqe_flushed)
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
else {
|
||||
|
||||
switch (CQE_STATUS(cqe)) {
|
||||
case TPT_ERR_SUCCESS:
|
||||
wc->status = IB_WC_SUCCESS;
|
||||
break;
|
||||
case TPT_ERR_STAG:
|
||||
wc->status = IB_WC_LOC_ACCESS_ERR;
|
||||
break;
|
||||
case TPT_ERR_PDID:
|
||||
wc->status = IB_WC_LOC_PROT_ERR;
|
||||
break;
|
||||
case TPT_ERR_QPID:
|
||||
case TPT_ERR_ACCESS:
|
||||
wc->status = IB_WC_LOC_ACCESS_ERR;
|
||||
break;
|
||||
case TPT_ERR_WRAP:
|
||||
wc->status = IB_WC_GENERAL_ERR;
|
||||
break;
|
||||
case TPT_ERR_BOUND:
|
||||
wc->status = IB_WC_LOC_LEN_ERR;
|
||||
break;
|
||||
case TPT_ERR_INVALIDATE_SHARED_MR:
|
||||
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
|
||||
wc->status = IB_WC_MW_BIND_ERR;
|
||||
break;
|
||||
case TPT_ERR_CRC:
|
||||
case TPT_ERR_MARKER:
|
||||
case TPT_ERR_PDU_LEN_ERR:
|
||||
case TPT_ERR_OUT_OF_RQE:
|
||||
case TPT_ERR_DDP_VERSION:
|
||||
case TPT_ERR_RDMA_VERSION:
|
||||
case TPT_ERR_DDP_QUEUE_NUM:
|
||||
case TPT_ERR_MSN:
|
||||
case TPT_ERR_TBIT:
|
||||
case TPT_ERR_MO:
|
||||
case TPT_ERR_MSN_RANGE:
|
||||
case TPT_ERR_IRD_OVERFLOW:
|
||||
case TPT_ERR_OPCODE:
|
||||
wc->status = IB_WC_FATAL_ERR;
|
||||
break;
|
||||
case TPT_ERR_SWFLUSH:
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
break;
|
||||
default:
|
||||
pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
|
||||
CQE_STATUS(cqe), CQE_QPID(cqe));
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get one cq entry from cxio and map it to openib.
|
||||
*
|
||||
* Returns:
|
||||
* 0 EMPTY;
|
||||
* 1 cqe returned
|
||||
* -EAGAIN caller must try again
|
||||
* any other -errno fatal error
|
||||
*/
|
||||
static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
|
||||
struct ib_wc *wc)
|
||||
{
|
||||
struct iwch_qp *qhp;
|
||||
struct t3_cqe *rd_cqe;
|
||||
int ret;
|
||||
|
||||
rd_cqe = cxio_next_cqe(&chp->cq);
|
||||
|
||||
if (!rd_cqe)
|
||||
return 0;
|
||||
|
||||
qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
|
||||
if (qhp) {
|
||||
spin_lock(&qhp->lock);
|
||||
ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
|
||||
spin_unlock(&qhp->lock);
|
||||
} else {
|
||||
ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||
{
|
||||
struct iwch_dev *rhp;
|
||||
struct iwch_cq *chp;
|
||||
unsigned long flags;
|
||||
int npolled;
|
||||
int err = 0;
|
||||
|
||||
chp = to_iwch_cq(ibcq);
|
||||
rhp = chp->rhp;
|
||||
|
||||
spin_lock_irqsave(&chp->lock, flags);
|
||||
for (npolled = 0; npolled < num_entries; ++npolled) {
|
||||
|
||||
/*
|
||||
* Because T3 can post CQEs that are _not_ associated
|
||||
* with a WR, we might have to poll again after removing
|
||||
* one of these.
|
||||
*/
|
||||
do {
|
||||
err = iwch_poll_cq_one(rhp, chp, wc + npolled);
|
||||
} while (err == -EAGAIN);
|
||||
if (err <= 0)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&chp->lock, flags);
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
else {
|
||||
return npolled;
|
||||
}
|
||||
}
|
@ -1,232 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/mman.h>
|
||||
#include <net/sock.h>
|
||||
#include "iwch_provider.h"
|
||||
#include "iwch.h"
|
||||
#include "iwch_cm.h"
|
||||
#include "cxio_hal.h"
|
||||
#include "cxio_wr.h"
|
||||
|
||||
static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
|
||||
struct respQ_msg_t *rsp_msg,
|
||||
enum ib_event_type ib_event,
|
||||
int send_term)
|
||||
{
|
||||
struct ib_event event;
|
||||
struct iwch_qp_attributes attrs;
|
||||
struct iwch_qp *qhp;
|
||||
unsigned long flag;
|
||||
|
||||
xa_lock(&rnicp->qps);
|
||||
qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
|
||||
|
||||
if (!qhp) {
|
||||
pr_err("%s unaffiliated error 0x%x qpid 0x%x\n",
|
||||
__func__, CQE_STATUS(rsp_msg->cqe),
|
||||
CQE_QPID(rsp_msg->cqe));
|
||||
xa_unlock(&rnicp->qps);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
|
||||
(qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
|
||||
pr_debug("%s AE received after RTS - qp state %d qpid 0x%x status 0x%x\n",
|
||||
__func__,
|
||||
qhp->attr.state, qhp->wq.qpid,
|
||||
CQE_STATUS(rsp_msg->cqe));
|
||||
xa_unlock(&rnicp->qps);
|
||||
return;
|
||||
}
|
||||
|
||||
pr_err("%s - AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
__func__,
|
||||
CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
|
||||
CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
|
||||
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
|
||||
|
||||
atomic_inc(&qhp->refcnt);
|
||||
xa_unlock(&rnicp->qps);
|
||||
|
||||
if (qhp->attr.state == IWCH_QP_STATE_RTS) {
|
||||
attrs.next_state = IWCH_QP_STATE_TERMINATE;
|
||||
iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
|
||||
&attrs, 1);
|
||||
if (send_term)
|
||||
iwch_post_terminate(qhp, rsp_msg);
|
||||
}
|
||||
|
||||
event.event = ib_event;
|
||||
event.device = chp->ibcq.device;
|
||||
if (ib_event == IB_EVENT_CQ_ERR)
|
||||
event.element.cq = &chp->ibcq;
|
||||
else
|
||||
event.element.qp = &qhp->ibqp;
|
||||
|
||||
if (qhp->ibqp.event_handler)
|
||||
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
||||
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
|
||||
if (atomic_dec_and_test(&qhp->refcnt))
|
||||
wake_up(&qhp->wait);
|
||||
}
|
||||
|
||||
void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
|
||||
{
|
||||
struct iwch_dev *rnicp;
|
||||
struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
|
||||
struct iwch_cq *chp;
|
||||
struct iwch_qp *qhp;
|
||||
u32 cqid = RSPQ_CQID(rsp_msg);
|
||||
unsigned long flag;
|
||||
|
||||
rnicp = (struct iwch_dev *) rdev_p->ulp;
|
||||
xa_lock(&rnicp->qps);
|
||||
chp = get_chp(rnicp, cqid);
|
||||
qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
|
||||
if (!chp || !qhp) {
|
||||
pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
|
||||
cqid, CQE_QPID(rsp_msg->cqe),
|
||||
CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
|
||||
CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
|
||||
CQE_WRID_LOW(rsp_msg->cqe));
|
||||
xa_unlock(&rnicp->qps);
|
||||
goto out;
|
||||
}
|
||||
iwch_qp_add_ref(&qhp->ibqp);
|
||||
atomic_inc(&chp->refcnt);
|
||||
xa_unlock(&rnicp->qps);
|
||||
|
||||
/*
|
||||
* 1) completion of our sending a TERMINATE.
|
||||
* 2) incoming TERMINATE message.
|
||||
*/
|
||||
if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
|
||||
(CQE_STATUS(rsp_msg->cqe) == 0)) {
|
||||
if (SQ_TYPE(rsp_msg->cqe)) {
|
||||
pr_debug("%s QPID 0x%x ep %p disconnecting\n",
|
||||
__func__, qhp->wq.qpid, qhp->ep);
|
||||
iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
|
||||
} else {
|
||||
pr_debug("%s post REQ_ERR AE QPID 0x%x\n", __func__,
|
||||
qhp->wq.qpid);
|
||||
post_qp_event(rnicp, chp, rsp_msg,
|
||||
IB_EVENT_QP_REQ_ERR, 0);
|
||||
iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Bad incoming Read request */
|
||||
if (SQ_TYPE(rsp_msg->cqe) &&
|
||||
(CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
|
||||
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Bad incoming write */
|
||||
if (RQ_TYPE(rsp_msg->cqe) &&
|
||||
(CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
|
||||
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (CQE_STATUS(rsp_msg->cqe)) {
|
||||
|
||||
/* Completion Events */
|
||||
case TPT_ERR_SUCCESS:
|
||||
|
||||
/*
|
||||
* Confirm the destination entry if this is a RECV completion.
|
||||
*/
|
||||
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
|
||||
dst_confirm(qhp->ep->dst);
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
break;
|
||||
|
||||
case TPT_ERR_STAG:
|
||||
case TPT_ERR_PDID:
|
||||
case TPT_ERR_QPID:
|
||||
case TPT_ERR_ACCESS:
|
||||
case TPT_ERR_WRAP:
|
||||
case TPT_ERR_BOUND:
|
||||
case TPT_ERR_INVALIDATE_SHARED_MR:
|
||||
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
|
||||
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
|
||||
break;
|
||||
|
||||
/* Device Fatal Errors */
|
||||
case TPT_ERR_ECC:
|
||||
case TPT_ERR_ECC_PSTAG:
|
||||
case TPT_ERR_INTERNAL_ERR:
|
||||
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
|
||||
break;
|
||||
|
||||
/* QP Fatal Errors */
|
||||
case TPT_ERR_OUT_OF_RQE:
|
||||
case TPT_ERR_PBL_ADDR_BOUND:
|
||||
case TPT_ERR_CRC:
|
||||
case TPT_ERR_MARKER:
|
||||
case TPT_ERR_PDU_LEN_ERR:
|
||||
case TPT_ERR_DDP_VERSION:
|
||||
case TPT_ERR_RDMA_VERSION:
|
||||
case TPT_ERR_OPCODE:
|
||||
case TPT_ERR_DDP_QUEUE_NUM:
|
||||
case TPT_ERR_MSN:
|
||||
case TPT_ERR_TBIT:
|
||||
case TPT_ERR_MO:
|
||||
case TPT_ERR_MSN_GAP:
|
||||
case TPT_ERR_MSN_RANGE:
|
||||
case TPT_ERR_RQE_ADDR_BOUND:
|
||||
case TPT_ERR_IRD_OVERFLOW:
|
||||
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("Unknown T3 status 0x%x QPID 0x%x\n",
|
||||
CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
|
||||
post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
|
||||
break;
|
||||
}
|
||||
done:
|
||||
if (atomic_dec_and_test(&chp->refcnt))
|
||||
wake_up(&chp->wait);
|
||||
iwch_qp_rem_ref(&qhp->ibqp);
|
||||
out:
|
||||
dev_kfree_skb_irq(skb);
|
||||
}
|
@ -1,101 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/slab.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include <rdma/iw_cm.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#include "cxio_hal.h"
|
||||
#include "cxio_resource.h"
|
||||
#include "iwch.h"
|
||||
#include "iwch_provider.h"
|
||||
|
||||
static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
|
||||
{
|
||||
u32 mmid;
|
||||
|
||||
mhp->attr.state = 1;
|
||||
mhp->attr.stag = stag;
|
||||
mmid = stag >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
|
||||
return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
|
||||
}
|
||||
|
||||
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
struct iwch_mr *mhp, int shift)
|
||||
{
|
||||
u32 stag;
|
||||
int ret;
|
||||
|
||||
if (cxio_register_phys_mem(&rhp->rdev,
|
||||
&stag, mhp->attr.pdid,
|
||||
mhp->attr.perms,
|
||||
mhp->attr.zbva,
|
||||
mhp->attr.va_fbo,
|
||||
mhp->attr.len,
|
||||
shift - 12,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = iwch_finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
|
||||
{
|
||||
mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
|
||||
npages << 3);
|
||||
|
||||
if (!mhp->attr.pbl_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
mhp->attr.pbl_size = npages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwch_free_pbl(struct iwch_mr *mhp)
|
||||
{
|
||||
cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
}
|
||||
|
||||
int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
|
||||
{
|
||||
return cxio_write_pbl(&mhp->rhp->rdev, pages,
|
||||
mhp->attr.pbl_addr + (offset << 3), npages);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,347 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __IWCH_PROVIDER_H__
|
||||
#define __IWCH_PROVIDER_H__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <asm/types.h>
|
||||
#include "t3cdev.h"
|
||||
#include "iwch.h"
|
||||
#include "cxio_wr.h"
|
||||
#include "cxio_hal.h"
|
||||
|
||||
struct iwch_pd {
|
||||
struct ib_pd ibpd;
|
||||
u32 pdid;
|
||||
struct iwch_dev *rhp;
|
||||
};
|
||||
|
||||
static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
|
||||
{
|
||||
return container_of(ibpd, struct iwch_pd, ibpd);
|
||||
}
|
||||
|
||||
struct tpt_attributes {
|
||||
u32 stag;
|
||||
u32 state:1;
|
||||
u32 type:2;
|
||||
u32 rsvd:1;
|
||||
enum tpt_mem_perm perms;
|
||||
u32 remote_invaliate_disable:1;
|
||||
u32 zbva:1;
|
||||
u32 mw_bind_enable:1;
|
||||
u32 page_size:5;
|
||||
|
||||
u32 pdid;
|
||||
u32 qpid;
|
||||
u32 pbl_addr;
|
||||
u32 len;
|
||||
u64 va_fbo;
|
||||
u32 pbl_size;
|
||||
};
|
||||
|
||||
struct iwch_mr {
|
||||
struct ib_mr ibmr;
|
||||
struct ib_umem *umem;
|
||||
struct iwch_dev *rhp;
|
||||
u64 kva;
|
||||
struct tpt_attributes attr;
|
||||
u64 *pages;
|
||||
u32 npages;
|
||||
};
|
||||
|
||||
typedef struct iwch_mw iwch_mw_handle;
|
||||
|
||||
static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
return container_of(ibmr, struct iwch_mr, ibmr);
|
||||
}
|
||||
|
||||
struct iwch_mw {
|
||||
struct ib_mw ibmw;
|
||||
struct iwch_dev *rhp;
|
||||
u64 kva;
|
||||
struct tpt_attributes attr;
|
||||
};
|
||||
|
||||
static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
|
||||
{
|
||||
return container_of(ibmw, struct iwch_mw, ibmw);
|
||||
}
|
||||
|
||||
struct iwch_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct iwch_dev *rhp;
|
||||
struct t3_cq cq;
|
||||
spinlock_t lock;
|
||||
spinlock_t comp_handler_lock;
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
u32 __user *user_rptr_addr;
|
||||
};
|
||||
|
||||
static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
|
||||
{
|
||||
return container_of(ibcq, struct iwch_cq, ibcq);
|
||||
}
|
||||
|
||||
enum IWCH_QP_FLAGS {
|
||||
QP_QUIESCED = 0x01
|
||||
};
|
||||
|
||||
struct iwch_mpa_attributes {
|
||||
u8 initiator;
|
||||
u8 recv_marker_enabled;
|
||||
u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
|
||||
u8 crc_enabled;
|
||||
u8 version; /* 0 or 1 */
|
||||
};
|
||||
|
||||
struct iwch_qp_attributes {
|
||||
u32 scq;
|
||||
u32 rcq;
|
||||
u32 sq_num_entries;
|
||||
u32 rq_num_entries;
|
||||
u32 sq_max_sges;
|
||||
u32 sq_max_sges_rdma_write;
|
||||
u32 rq_max_sges;
|
||||
u32 state;
|
||||
u8 enable_rdma_read;
|
||||
u8 enable_rdma_write; /* enable inbound Read Resp. */
|
||||
u8 enable_bind;
|
||||
u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
|
||||
/*
|
||||
* Next QP state. If specify the current state, only the
|
||||
* QP attributes will be modified.
|
||||
*/
|
||||
u32 max_ord;
|
||||
u32 max_ird;
|
||||
u32 pd; /* IN */
|
||||
u32 next_state;
|
||||
char terminate_buffer[52];
|
||||
u32 terminate_msg_len;
|
||||
u8 is_terminate_local;
|
||||
struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
|
||||
struct iwch_ep *llp_stream_handle;
|
||||
char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
|
||||
u32 stream_msg_buf_len; /* Only on Idle -> RTS */
|
||||
};
|
||||
|
||||
struct iwch_qp {
|
||||
struct ib_qp ibqp;
|
||||
struct iwch_dev *rhp;
|
||||
struct iwch_ep *ep;
|
||||
struct iwch_qp_attributes attr;
|
||||
struct t3_wq wq;
|
||||
spinlock_t lock;
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
enum IWCH_QP_FLAGS flags;
|
||||
};
|
||||
|
||||
static inline int qp_quiesced(struct iwch_qp *qhp)
|
||||
{
|
||||
return qhp->flags & QP_QUIESCED;
|
||||
}
|
||||
|
||||
static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
|
||||
{
|
||||
return container_of(ibqp, struct iwch_qp, ibqp);
|
||||
}
|
||||
|
||||
void iwch_qp_add_ref(struct ib_qp *qp);
|
||||
void iwch_qp_rem_ref(struct ib_qp *qp);
|
||||
|
||||
struct iwch_ucontext {
|
||||
struct ib_ucontext ibucontext;
|
||||
struct cxio_ucontext uctx;
|
||||
u32 key;
|
||||
spinlock_t mmap_lock;
|
||||
struct list_head mmaps;
|
||||
};
|
||||
|
||||
static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
|
||||
{
|
||||
return container_of(c, struct iwch_ucontext, ibucontext);
|
||||
}
|
||||
|
||||
struct iwch_mm_entry {
|
||||
struct list_head entry;
|
||||
u64 addr;
|
||||
u32 key;
|
||||
unsigned len;
|
||||
};
|
||||
|
||||
static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
|
||||
u32 key, unsigned len)
|
||||
{
|
||||
struct list_head *pos, *nxt;
|
||||
struct iwch_mm_entry *mm;
|
||||
|
||||
spin_lock(&ucontext->mmap_lock);
|
||||
list_for_each_safe(pos, nxt, &ucontext->mmaps) {
|
||||
|
||||
mm = list_entry(pos, struct iwch_mm_entry, entry);
|
||||
if (mm->key == key && mm->len == len) {
|
||||
list_del_init(&mm->entry);
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
pr_debug("%s key 0x%x addr 0x%llx len %d\n",
|
||||
__func__, key,
|
||||
(unsigned long long)mm->addr, mm->len);
|
||||
return mm;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void insert_mmap(struct iwch_ucontext *ucontext,
|
||||
struct iwch_mm_entry *mm)
|
||||
{
|
||||
spin_lock(&ucontext->mmap_lock);
|
||||
pr_debug("%s key 0x%x addr 0x%llx len %d\n",
|
||||
__func__, mm->key, (unsigned long long)mm->addr, mm->len);
|
||||
list_add_tail(&mm->entry, &ucontext->mmaps);
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
}
|
||||
|
||||
enum iwch_qp_attr_mask {
|
||||
IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
|
||||
IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
|
||||
IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
|
||||
IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
|
||||
IWCH_QP_ATTR_MAX_ORD = 1 << 11,
|
||||
IWCH_QP_ATTR_MAX_IRD = 1 << 12,
|
||||
IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
|
||||
IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
|
||||
IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
|
||||
IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
|
||||
IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
|
||||
IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
|
||||
IWCH_QP_ATTR_MAX_ORD |
|
||||
IWCH_QP_ATTR_MAX_IRD |
|
||||
IWCH_QP_ATTR_LLP_STREAM_HANDLE |
|
||||
IWCH_QP_ATTR_STREAM_MSG_BUFFER |
|
||||
IWCH_QP_ATTR_MPA_ATTR |
|
||||
IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
|
||||
};
|
||||
|
||||
int iwch_modify_qp(struct iwch_dev *rhp,
|
||||
struct iwch_qp *qhp,
|
||||
enum iwch_qp_attr_mask mask,
|
||||
struct iwch_qp_attributes *attrs,
|
||||
int internal);
|
||||
|
||||
enum iwch_qp_state {
|
||||
IWCH_QP_STATE_IDLE,
|
||||
IWCH_QP_STATE_RTS,
|
||||
IWCH_QP_STATE_ERROR,
|
||||
IWCH_QP_STATE_TERMINATE,
|
||||
IWCH_QP_STATE_CLOSING,
|
||||
IWCH_QP_STATE_TOT
|
||||
};
|
||||
|
||||
static inline int iwch_convert_state(enum ib_qp_state ib_state)
|
||||
{
|
||||
switch (ib_state) {
|
||||
case IB_QPS_RESET:
|
||||
case IB_QPS_INIT:
|
||||
return IWCH_QP_STATE_IDLE;
|
||||
case IB_QPS_RTS:
|
||||
return IWCH_QP_STATE_RTS;
|
||||
case IB_QPS_SQD:
|
||||
return IWCH_QP_STATE_CLOSING;
|
||||
case IB_QPS_SQE:
|
||||
return IWCH_QP_STATE_TERMINATE;
|
||||
case IB_QPS_ERR:
|
||||
return IWCH_QP_STATE_ERROR;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 iwch_ib_to_tpt_access(int acc)
|
||||
{
|
||||
return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
|
||||
(acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
|
||||
(acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
|
||||
(acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
|
||||
TPT_LOCAL_READ;
|
||||
}
|
||||
|
||||
static inline u32 iwch_ib_to_tpt_bind_access(int acc)
|
||||
{
|
||||
return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
|
||||
(acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
|
||||
}
|
||||
|
||||
enum iwch_mmid_state {
|
||||
IWCH_STAG_STATE_VALID,
|
||||
IWCH_STAG_STATE_INVALID
|
||||
};
|
||||
|
||||
enum iwch_qp_query_flags {
|
||||
IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
|
||||
IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
|
||||
IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
|
||||
|
||||
/*
|
||||
* Quiesce QP context; Consumer
|
||||
* will NOT replay outstanding WR
|
||||
*/
|
||||
IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
|
||||
IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
|
||||
IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
|
||||
};
|
||||
|
||||
u16 iwch_rqes_posted(struct iwch_qp *qhp);
|
||||
int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
|
||||
int iwch_post_zb_read(struct iwch_ep *ep);
|
||||
int iwch_register_device(struct iwch_dev *dev);
|
||||
void iwch_unregister_device(struct iwch_dev *dev);
|
||||
void stop_read_rep_timer(struct iwch_qp *qhp);
|
||||
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
struct iwch_mr *mhp, int shift);
|
||||
int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
|
||||
void iwch_free_pbl(struct iwch_mr *mhp);
|
||||
int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
|
||||
|
||||
#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,632 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2007 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef _TCB_DEFS_H
|
||||
#define _TCB_DEFS_H
|
||||
|
||||
#define W_TCB_T_STATE 0
|
||||
#define S_TCB_T_STATE 0
|
||||
#define M_TCB_T_STATE 0xfULL
|
||||
#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
|
||||
|
||||
#define W_TCB_TIMER 0
|
||||
#define S_TCB_TIMER 4
|
||||
#define M_TCB_TIMER 0x1ULL
|
||||
#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
|
||||
|
||||
#define W_TCB_DACK_TIMER 0
|
||||
#define S_TCB_DACK_TIMER 5
|
||||
#define M_TCB_DACK_TIMER 0x1ULL
|
||||
#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
|
||||
|
||||
#define W_TCB_DEL_FLAG 0
|
||||
#define S_TCB_DEL_FLAG 6
|
||||
#define M_TCB_DEL_FLAG 0x1ULL
|
||||
#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
|
||||
|
||||
#define W_TCB_L2T_IX 0
|
||||
#define S_TCB_L2T_IX 7
|
||||
#define M_TCB_L2T_IX 0x7ffULL
|
||||
#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
|
||||
|
||||
#define W_TCB_SMAC_SEL 0
|
||||
#define S_TCB_SMAC_SEL 18
|
||||
#define M_TCB_SMAC_SEL 0x3ULL
|
||||
#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
|
||||
|
||||
#define W_TCB_TOS 0
|
||||
#define S_TCB_TOS 20
|
||||
#define M_TCB_TOS 0x3fULL
|
||||
#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
|
||||
|
||||
#define W_TCB_MAX_RT 0
|
||||
#define S_TCB_MAX_RT 26
|
||||
#define M_TCB_MAX_RT 0xfULL
|
||||
#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
|
||||
|
||||
#define W_TCB_T_RXTSHIFT 0
|
||||
#define S_TCB_T_RXTSHIFT 30
|
||||
#define M_TCB_T_RXTSHIFT 0xfULL
|
||||
#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
|
||||
|
||||
#define W_TCB_T_DUPACKS 1
|
||||
#define S_TCB_T_DUPACKS 2
|
||||
#define M_TCB_T_DUPACKS 0xfULL
|
||||
#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
|
||||
|
||||
#define W_TCB_T_MAXSEG 1
|
||||
#define S_TCB_T_MAXSEG 6
|
||||
#define M_TCB_T_MAXSEG 0xfULL
|
||||
#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
|
||||
|
||||
#define W_TCB_T_FLAGS1 1
|
||||
#define S_TCB_T_FLAGS1 10
|
||||
#define M_TCB_T_FLAGS1 0xffffffffULL
|
||||
#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
|
||||
|
||||
#define W_TCB_T_MIGRATION 1
|
||||
#define S_TCB_T_MIGRATION 20
|
||||
#define M_TCB_T_MIGRATION 0x1ULL
|
||||
#define V_TCB_T_MIGRATION(x) ((x) << S_TCB_T_MIGRATION)
|
||||
|
||||
#define W_TCB_T_FLAGS2 2
|
||||
#define S_TCB_T_FLAGS2 10
|
||||
#define M_TCB_T_FLAGS2 0x7fULL
|
||||
#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
|
||||
|
||||
#define W_TCB_SND_SCALE 2
|
||||
#define S_TCB_SND_SCALE 17
|
||||
#define M_TCB_SND_SCALE 0xfULL
|
||||
#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
|
||||
|
||||
#define W_TCB_RCV_SCALE 2
|
||||
#define S_TCB_RCV_SCALE 21
|
||||
#define M_TCB_RCV_SCALE 0xfULL
|
||||
#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
|
||||
|
||||
#define W_TCB_SND_UNA_RAW 2
|
||||
#define S_TCB_SND_UNA_RAW 25
|
||||
#define M_TCB_SND_UNA_RAW 0x7ffffffULL
|
||||
#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
|
||||
|
||||
#define W_TCB_SND_NXT_RAW 3
|
||||
#define S_TCB_SND_NXT_RAW 20
|
||||
#define M_TCB_SND_NXT_RAW 0x7ffffffULL
|
||||
#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
|
||||
|
||||
#define W_TCB_RCV_NXT 4
|
||||
#define S_TCB_RCV_NXT 15
|
||||
#define M_TCB_RCV_NXT 0xffffffffULL
|
||||
#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
|
||||
|
||||
#define W_TCB_RCV_ADV 5
|
||||
#define S_TCB_RCV_ADV 15
|
||||
#define M_TCB_RCV_ADV 0xffffULL
|
||||
#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
|
||||
|
||||
#define W_TCB_SND_MAX_RAW 5
|
||||
#define S_TCB_SND_MAX_RAW 31
|
||||
#define M_TCB_SND_MAX_RAW 0x7ffffffULL
|
||||
#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
|
||||
|
||||
#define W_TCB_SND_CWND 6
|
||||
#define S_TCB_SND_CWND 26
|
||||
#define M_TCB_SND_CWND 0x7ffffffULL
|
||||
#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
|
||||
|
||||
#define W_TCB_SND_SSTHRESH 7
|
||||
#define S_TCB_SND_SSTHRESH 21
|
||||
#define M_TCB_SND_SSTHRESH 0x7ffffffULL
|
||||
#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
|
||||
|
||||
#define W_TCB_T_RTT_TS_RECENT_AGE 8
|
||||
#define S_TCB_T_RTT_TS_RECENT_AGE 16
|
||||
#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
|
||||
#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
|
||||
|
||||
#define W_TCB_T_RTSEQ_RECENT 9
|
||||
#define S_TCB_T_RTSEQ_RECENT 16
|
||||
#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
|
||||
#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
|
||||
|
||||
#define W_TCB_T_SRTT 10
|
||||
#define S_TCB_T_SRTT 16
|
||||
#define M_TCB_T_SRTT 0xffffULL
|
||||
#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
|
||||
|
||||
#define W_TCB_T_RTTVAR 11
|
||||
#define S_TCB_T_RTTVAR 0
|
||||
#define M_TCB_T_RTTVAR 0xffffULL
|
||||
#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
|
||||
|
||||
#define W_TCB_TS_LAST_ACK_SENT_RAW 11
|
||||
#define S_TCB_TS_LAST_ACK_SENT_RAW 16
|
||||
#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
|
||||
#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
|
||||
|
||||
#define W_TCB_DIP 12
|
||||
#define S_TCB_DIP 11
|
||||
#define M_TCB_DIP 0xffffffffULL
|
||||
#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
|
||||
|
||||
#define W_TCB_SIP 13
|
||||
#define S_TCB_SIP 11
|
||||
#define M_TCB_SIP 0xffffffffULL
|
||||
#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
|
||||
|
||||
#define W_TCB_DP 14
|
||||
#define S_TCB_DP 11
|
||||
#define M_TCB_DP 0xffffULL
|
||||
#define V_TCB_DP(x) ((x) << S_TCB_DP)
|
||||
|
||||
#define W_TCB_SP 14
|
||||
#define S_TCB_SP 27
|
||||
#define M_TCB_SP 0xffffULL
|
||||
#define V_TCB_SP(x) ((x) << S_TCB_SP)
|
||||
|
||||
#define W_TCB_TIMESTAMP 15
|
||||
#define S_TCB_TIMESTAMP 11
|
||||
#define M_TCB_TIMESTAMP 0xffffffffULL
|
||||
#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
|
||||
|
||||
#define W_TCB_TIMESTAMP_OFFSET 16
|
||||
#define S_TCB_TIMESTAMP_OFFSET 11
|
||||
#define M_TCB_TIMESTAMP_OFFSET 0xfULL
|
||||
#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
|
||||
|
||||
#define W_TCB_TX_MAX 16
|
||||
#define S_TCB_TX_MAX 15
|
||||
#define M_TCB_TX_MAX 0xffffffffULL
|
||||
#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
|
||||
|
||||
#define W_TCB_TX_HDR_PTR_RAW 17
|
||||
#define S_TCB_TX_HDR_PTR_RAW 15
|
||||
#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
|
||||
#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
|
||||
|
||||
#define W_TCB_TX_LAST_PTR_RAW 18
|
||||
#define S_TCB_TX_LAST_PTR_RAW 0
|
||||
#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
|
||||
#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
|
||||
|
||||
#define W_TCB_TX_COMPACT 18
|
||||
#define S_TCB_TX_COMPACT 17
|
||||
#define M_TCB_TX_COMPACT 0x1ULL
|
||||
#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
|
||||
|
||||
#define W_TCB_RX_COMPACT 18
|
||||
#define S_TCB_RX_COMPACT 18
|
||||
#define M_TCB_RX_COMPACT 0x1ULL
|
||||
#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
|
||||
|
||||
#define W_TCB_RCV_WND 18
|
||||
#define S_TCB_RCV_WND 19
|
||||
#define M_TCB_RCV_WND 0x7ffffffULL
|
||||
#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
|
||||
|
||||
#define W_TCB_RX_HDR_OFFSET 19
|
||||
#define S_TCB_RX_HDR_OFFSET 14
|
||||
#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
|
||||
#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
|
||||
|
||||
#define W_TCB_RX_FRAG0_START_IDX_RAW 20
|
||||
#define S_TCB_RX_FRAG0_START_IDX_RAW 9
|
||||
#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
|
||||
#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
|
||||
|
||||
#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
|
||||
#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
|
||||
#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
|
||||
#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
|
||||
|
||||
#define W_TCB_RX_FRAG0_LEN 21
|
||||
#define S_TCB_RX_FRAG0_LEN 31
|
||||
#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
|
||||
#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
|
||||
|
||||
#define W_TCB_RX_FRAG1_LEN 22
|
||||
#define S_TCB_RX_FRAG1_LEN 26
|
||||
#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
|
||||
#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
|
||||
|
||||
#define W_TCB_NEWRENO_RECOVER 23
|
||||
#define S_TCB_NEWRENO_RECOVER 21
|
||||
#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
|
||||
#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
|
||||
|
||||
#define W_TCB_PDU_HAVE_LEN 24
|
||||
#define S_TCB_PDU_HAVE_LEN 16
|
||||
#define M_TCB_PDU_HAVE_LEN 0x1ULL
|
||||
#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
|
||||
|
||||
#define W_TCB_PDU_LEN 24
|
||||
#define S_TCB_PDU_LEN 17
|
||||
#define M_TCB_PDU_LEN 0xffffULL
|
||||
#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
|
||||
|
||||
#define W_TCB_RX_QUIESCE 25
|
||||
#define S_TCB_RX_QUIESCE 1
|
||||
#define M_TCB_RX_QUIESCE 0x1ULL
|
||||
#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
|
||||
|
||||
#define W_TCB_RX_PTR_RAW 25
|
||||
#define S_TCB_RX_PTR_RAW 2
|
||||
#define M_TCB_RX_PTR_RAW 0x1ffffULL
|
||||
#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
|
||||
|
||||
#define W_TCB_CPU_NO 25
|
||||
#define S_TCB_CPU_NO 19
|
||||
#define M_TCB_CPU_NO 0x7fULL
|
||||
#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
|
||||
|
||||
#define W_TCB_ULP_TYPE 25
|
||||
#define S_TCB_ULP_TYPE 26
|
||||
#define M_TCB_ULP_TYPE 0xfULL
|
||||
#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
|
||||
|
||||
#define W_TCB_RX_FRAG1_PTR_RAW 25
|
||||
#define S_TCB_RX_FRAG1_PTR_RAW 30
|
||||
#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
|
||||
#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
|
||||
|
||||
#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
|
||||
#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
|
||||
#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
|
||||
#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
|
||||
|
||||
#define W_TCB_RX_FRAG2_PTR_RAW 27
|
||||
#define S_TCB_RX_FRAG2_PTR_RAW 10
|
||||
#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
|
||||
#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
|
||||
|
||||
#define W_TCB_RX_FRAG2_LEN_RAW 27
|
||||
#define S_TCB_RX_FRAG2_LEN_RAW 27
|
||||
#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
|
||||
#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
|
||||
|
||||
#define W_TCB_RX_FRAG3_PTR_RAW 28
|
||||
#define S_TCB_RX_FRAG3_PTR_RAW 22
|
||||
#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
|
||||
#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
|
||||
|
||||
#define W_TCB_RX_FRAG3_LEN_RAW 29
|
||||
#define S_TCB_RX_FRAG3_LEN_RAW 7
|
||||
#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
|
||||
#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
|
||||
|
||||
#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
|
||||
#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
|
||||
#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
|
||||
#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
|
||||
|
||||
#define W_TCB_PDU_HDR_LEN 30
|
||||
#define S_TCB_PDU_HDR_LEN 29
|
||||
#define M_TCB_PDU_HDR_LEN 0xffULL
|
||||
#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
|
||||
|
||||
#define W_TCB_SLUSH1 31
|
||||
#define S_TCB_SLUSH1 5
|
||||
#define M_TCB_SLUSH1 0x7ffffULL
|
||||
#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
|
||||
|
||||
#define W_TCB_ULP_RAW 31
|
||||
#define S_TCB_ULP_RAW 24
|
||||
#define M_TCB_ULP_RAW 0xffULL
|
||||
#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
|
||||
|
||||
#define W_TCB_DDP_RDMAP_VERSION 25
|
||||
#define S_TCB_DDP_RDMAP_VERSION 30
|
||||
#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
|
||||
#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
|
||||
|
||||
#define W_TCB_MARKER_ENABLE_RX 25
|
||||
#define S_TCB_MARKER_ENABLE_RX 31
|
||||
#define M_TCB_MARKER_ENABLE_RX 0x1ULL
|
||||
#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
|
||||
|
||||
#define W_TCB_MARKER_ENABLE_TX 26
|
||||
#define S_TCB_MARKER_ENABLE_TX 0
|
||||
#define M_TCB_MARKER_ENABLE_TX 0x1ULL
|
||||
#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
|
||||
|
||||
#define W_TCB_CRC_ENABLE 26
|
||||
#define S_TCB_CRC_ENABLE 1
|
||||
#define M_TCB_CRC_ENABLE 0x1ULL
|
||||
#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
|
||||
|
||||
#define W_TCB_IRS_ULP 26
|
||||
#define S_TCB_IRS_ULP 2
|
||||
#define M_TCB_IRS_ULP 0x1ffULL
|
||||
#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
|
||||
|
||||
#define W_TCB_ISS_ULP 26
|
||||
#define S_TCB_ISS_ULP 11
|
||||
#define M_TCB_ISS_ULP 0x1ffULL
|
||||
#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
|
||||
|
||||
#define W_TCB_TX_PDU_LEN 26
|
||||
#define S_TCB_TX_PDU_LEN 20
|
||||
#define M_TCB_TX_PDU_LEN 0x3fffULL
|
||||
#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
|
||||
|
||||
#define W_TCB_TX_PDU_OUT 27
|
||||
#define S_TCB_TX_PDU_OUT 2
|
||||
#define M_TCB_TX_PDU_OUT 0x1ULL
|
||||
#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
|
||||
|
||||
#define W_TCB_CQ_IDX_SQ 27
|
||||
#define S_TCB_CQ_IDX_SQ 3
|
||||
#define M_TCB_CQ_IDX_SQ 0xffffULL
|
||||
#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
|
||||
|
||||
#define W_TCB_CQ_IDX_RQ 27
|
||||
#define S_TCB_CQ_IDX_RQ 19
|
||||
#define M_TCB_CQ_IDX_RQ 0xffffULL
|
||||
#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
|
||||
|
||||
#define W_TCB_QP_ID 28
|
||||
#define S_TCB_QP_ID 3
|
||||
#define M_TCB_QP_ID 0xffffULL
|
||||
#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
|
||||
|
||||
#define W_TCB_PD_ID 28
|
||||
#define S_TCB_PD_ID 19
|
||||
#define M_TCB_PD_ID 0xffffULL
|
||||
#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
|
||||
|
||||
#define W_TCB_STAG 29
|
||||
#define S_TCB_STAG 3
|
||||
#define M_TCB_STAG 0xffffffffULL
|
||||
#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
|
||||
|
||||
#define W_TCB_RQ_START 30
|
||||
#define S_TCB_RQ_START 3
|
||||
#define M_TCB_RQ_START 0x3ffffffULL
|
||||
#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
|
||||
|
||||
#define W_TCB_RQ_MSN 30
|
||||
#define S_TCB_RQ_MSN 29
|
||||
#define M_TCB_RQ_MSN 0x3ffULL
|
||||
#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
|
||||
|
||||
#define W_TCB_RQ_MAX_OFFSET 31
|
||||
#define S_TCB_RQ_MAX_OFFSET 7
|
||||
#define M_TCB_RQ_MAX_OFFSET 0xfULL
|
||||
#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
|
||||
|
||||
#define W_TCB_RQ_WRITE_PTR 31
|
||||
#define S_TCB_RQ_WRITE_PTR 11
|
||||
#define M_TCB_RQ_WRITE_PTR 0x3ffULL
|
||||
#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
|
||||
|
||||
#define W_TCB_INB_WRITE_PERM 31
|
||||
#define S_TCB_INB_WRITE_PERM 21
|
||||
#define M_TCB_INB_WRITE_PERM 0x1ULL
|
||||
#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
|
||||
|
||||
#define W_TCB_INB_READ_PERM 31
|
||||
#define S_TCB_INB_READ_PERM 22
|
||||
#define M_TCB_INB_READ_PERM 0x1ULL
|
||||
#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
|
||||
|
||||
#define W_TCB_ORD_L_BIT_VLD 31
|
||||
#define S_TCB_ORD_L_BIT_VLD 23
|
||||
#define M_TCB_ORD_L_BIT_VLD 0x1ULL
|
||||
#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
|
||||
|
||||
#define W_TCB_RDMAP_OPCODE 31
|
||||
#define S_TCB_RDMAP_OPCODE 24
|
||||
#define M_TCB_RDMAP_OPCODE 0xfULL
|
||||
#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
|
||||
|
||||
#define W_TCB_TX_FLUSH 31
|
||||
#define S_TCB_TX_FLUSH 28
|
||||
#define M_TCB_TX_FLUSH 0x1ULL
|
||||
#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
|
||||
|
||||
#define W_TCB_TX_OOS_RXMT 31
|
||||
#define S_TCB_TX_OOS_RXMT 29
|
||||
#define M_TCB_TX_OOS_RXMT 0x1ULL
|
||||
#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
|
||||
|
||||
#define W_TCB_TX_OOS_TXMT 31
|
||||
#define S_TCB_TX_OOS_TXMT 30
|
||||
#define M_TCB_TX_OOS_TXMT 0x1ULL
|
||||
#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
|
||||
|
||||
#define W_TCB_SLUSH_AUX2 31
|
||||
#define S_TCB_SLUSH_AUX2 31
|
||||
#define M_TCB_SLUSH_AUX2 0x1ULL
|
||||
#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
|
||||
|
||||
#define W_TCB_RX_FRAG1_PTR_RAW2 25
|
||||
#define S_TCB_RX_FRAG1_PTR_RAW2 30
|
||||
#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
|
||||
#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
|
||||
|
||||
#define W_TCB_RX_DDP_FLAGS 26
|
||||
#define S_TCB_RX_DDP_FLAGS 15
|
||||
#define M_TCB_RX_DDP_FLAGS 0x3ffULL
|
||||
#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
|
||||
|
||||
#define W_TCB_SLUSH_AUX3 26
|
||||
#define S_TCB_SLUSH_AUX3 31
|
||||
#define M_TCB_SLUSH_AUX3 0x1ffULL
|
||||
#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
|
||||
|
||||
#define W_TCB_RX_DDP_BUF0_OFFSET 27
|
||||
#define S_TCB_RX_DDP_BUF0_OFFSET 8
|
||||
#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
|
||||
#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
|
||||
|
||||
#define W_TCB_RX_DDP_BUF0_LEN 27
|
||||
#define S_TCB_RX_DDP_BUF0_LEN 30
|
||||
#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
|
||||
#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
|
||||
|
||||
#define W_TCB_RX_DDP_BUF1_OFFSET 28
|
||||
#define S_TCB_RX_DDP_BUF1_OFFSET 20
|
||||
#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
|
||||
#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
|
||||
|
||||
#define W_TCB_RX_DDP_BUF1_LEN 29
|
||||
#define S_TCB_RX_DDP_BUF1_LEN 10
|
||||
#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
|
||||
#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
|
||||
|
||||
#define W_TCB_RX_DDP_BUF0_TAG 30
|
||||
#define S_TCB_RX_DDP_BUF0_TAG 0
|
||||
#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
|
||||
#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
|
||||
|
||||
#define W_TCB_RX_DDP_BUF1_TAG 31
|
||||
#define S_TCB_RX_DDP_BUF1_TAG 0
|
||||
#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
|
||||
#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
|
||||
|
||||
#define S_TF_DACK 10
|
||||
#define V_TF_DACK(x) ((x) << S_TF_DACK)
|
||||
|
||||
#define S_TF_NAGLE 11
|
||||
#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
|
||||
|
||||
#define S_TF_RECV_SCALE 12
|
||||
#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
|
||||
|
||||
#define S_TF_RECV_TSTMP 13
|
||||
#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
|
||||
|
||||
#define S_TF_RECV_SACK 14
|
||||
#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
|
||||
|
||||
#define S_TF_TURBO 15
|
||||
#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
|
||||
|
||||
#define S_TF_KEEPALIVE 16
|
||||
#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
|
||||
|
||||
#define S_TF_TCAM_BYPASS 17
|
||||
#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
|
||||
|
||||
#define S_TF_CORE_FIN 18
|
||||
#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
|
||||
|
||||
#define S_TF_CORE_MORE 19
|
||||
#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
|
||||
|
||||
#define S_TF_MIGRATING 20
|
||||
#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
|
||||
|
||||
#define S_TF_ACTIVE_OPEN 21
|
||||
#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
|
||||
|
||||
#define S_TF_ASK_MODE 22
|
||||
#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
|
||||
|
||||
#define S_TF_NON_OFFLOAD 23
|
||||
#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
|
||||
|
||||
#define S_TF_MOD_SCHD 24
|
||||
#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
|
||||
|
||||
#define S_TF_MOD_SCHD_REASON0 25
|
||||
#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
|
||||
|
||||
#define S_TF_MOD_SCHD_REASON1 26
|
||||
#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
|
||||
|
||||
#define S_TF_MOD_SCHD_RX 27
|
||||
#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
|
||||
|
||||
#define S_TF_CORE_PUSH 28
|
||||
#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
|
||||
|
||||
#define S_TF_RCV_COALESCE_ENABLE 29
|
||||
#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
|
||||
|
||||
#define S_TF_RCV_COALESCE_PUSH 30
|
||||
#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
|
||||
|
||||
#define S_TF_RCV_COALESCE_LAST_PSH 31
|
||||
#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
|
||||
|
||||
#define S_TF_RCV_COALESCE_HEARTBEAT 32
|
||||
#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
|
||||
|
||||
#define S_TF_HALF_CLOSE 33
|
||||
#define V_TF_HALF_CLOSE(x) ((x) << S_TF_HALF_CLOSE)
|
||||
|
||||
#define S_TF_DACK_MSS 34
|
||||
#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
|
||||
|
||||
#define S_TF_CCTRL_SEL0 35
|
||||
#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
|
||||
|
||||
#define S_TF_CCTRL_SEL1 36
|
||||
#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
|
||||
|
||||
#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
|
||||
#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
|
||||
|
||||
#define S_TF_TX_PACE_AUTO 38
|
||||
#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
|
||||
|
||||
#define S_TF_PEER_FIN_HELD 39
|
||||
#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
|
||||
|
||||
#define S_TF_CORE_URG 40
|
||||
#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
|
||||
|
||||
#define S_TF_RDMA_ERROR 41
|
||||
#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
|
||||
|
||||
#define S_TF_SSWS_DISABLED 42
|
||||
#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
|
||||
|
||||
#define S_TF_DUPACK_COUNT_ODD 43
|
||||
#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
|
||||
|
||||
#define S_TF_TX_CHANNEL 44
|
||||
#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
|
||||
|
||||
#define S_TF_RX_CHANNEL 45
|
||||
#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
|
||||
|
||||
#define S_TF_TX_PACE_FIXED 46
|
||||
#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
|
||||
|
||||
#define S_TF_RDMA_FLM_ERROR 47
|
||||
#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
|
||||
|
||||
#define S_TF_RX_FLOW_CONTROL_DISABLE 48
|
||||
#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
|
||||
|
||||
#endif /* _TCB_DEFS_H */
|
@ -1,82 +0,0 @@
|
||||
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
|
||||
/*
|
||||
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef CXGB3_ABI_USER_H
|
||||
#define CXGB3_ABI_USER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define IWCH_UVERBS_ABI_VERSION 1
|
||||
|
||||
/*
|
||||
* Make sure that all structs defined in this file remain laid out so
|
||||
* that they pack the same way on 32-bit and 64-bit architectures (to
|
||||
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
|
||||
* In particular do not use pointer types -- pass pointers in __aligned_u64
|
||||
* instead.
|
||||
*/
|
||||
struct iwch_create_cq_req {
|
||||
__aligned_u64 user_rptr_addr;
|
||||
};
|
||||
|
||||
struct iwch_create_cq_resp_v0 {
|
||||
__aligned_u64 key;
|
||||
__u32 cqid;
|
||||
__u32 size_log2;
|
||||
};
|
||||
|
||||
struct iwch_create_cq_resp {
|
||||
__aligned_u64 key;
|
||||
__u32 cqid;
|
||||
__u32 size_log2;
|
||||
__u32 memsize;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct iwch_create_qp_resp {
|
||||
__aligned_u64 key;
|
||||
__aligned_u64 db_key;
|
||||
__u32 qpid;
|
||||
__u32 size_log2;
|
||||
__u32 sq_size_log2;
|
||||
__u32 rq_size_log2;
|
||||
};
|
||||
|
||||
struct iwch_reg_user_mr_resp {
|
||||
__u32 pbl_addr;
|
||||
};
|
||||
|
||||
struct iwch_alloc_pd_resp {
|
||||
__u32 pdid;
|
||||
};
|
||||
|
||||
#endif /* CXGB3_ABI_USER_H */
|
@ -88,7 +88,6 @@ enum rdma_driver_id {
|
||||
RDMA_DRIVER_UNKNOWN,
|
||||
RDMA_DRIVER_MLX5,
|
||||
RDMA_DRIVER_MLX4,
|
||||
RDMA_DRIVER_CXGB3,
|
||||
RDMA_DRIVER_CXGB4,
|
||||
RDMA_DRIVER_MTHCA,
|
||||
RDMA_DRIVER_BNXT_RE,
|
||||
|
Loading…
Reference in New Issue
Block a user