2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 09:43:59 +08:00

qed*: Utilize Firmware 8.15.3.0

This patch advances the qed* drivers into using the newer firmware -
This solves several firmware bugs, mostly related [but not limited to]
various init/deinit issues in various offloaded protocols.

It also introduces a major 4-Cached SGE change in firmware, which can be
seen in the storage drivers' changes.

In addition, this firmware is required for supporting the new QL41xxx
series of adapters; While this patch doesn't add the actual support,
the firmware contains the necessary initialization & firmware logic to
operate such adapters [actual support would be added later on].

Changes from Previous versions:
-------------------------------
 - V2 - fix kbuild-test robot warnings

Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Manish Rangankar <Manish.Rangankar@cavium.com>
Signed-off-by: Chad Dupuis <Chad.Dupuis@cavium.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Mintz, Yuval 2017-03-11 18:39:18 +02:00 committed by David S. Miller
parent 6a019c5c50
commit be086e7c53
40 changed files with 4208 additions and 2202 deletions

View File

@ -587,9 +587,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
#define EVENT_TYPE_CQ 1
#define EVENT_TYPE_QP 2
struct qedr_dev *dev = (struct qedr_dev *)context;
union event_ring_data *data = fw_handle;
u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
data->roce_handle.lo;
struct regpair *async_handle = (struct regpair *)fw_handle;
u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
u8 event_type = EVENT_TYPE_NOT_DEFINED;
struct ib_event event;
struct ib_cq *ibcq;

View File

@ -38,7 +38,8 @@
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_roce_if.h>
#include <linux/qed/qede_roce.h>
#include "qedr_hsi.h"
#include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h"
#define QEDR_MODULE_VERSION "8.10.10.0"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"

View File

@ -43,14 +43,11 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include "qedr_hsi.h"
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_roce_if.h>
#include "qedr.h"
#include "qedr_hsi.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
#include "qedr_hsi.h"
#include "qedr_cm.h"
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)

View File

@ -1,56 +0,0 @@
/* QLogic qedr NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __QED_HSI_ROCE__
#define __QED_HSI_ROCE__
#include <linux/qed/common_hsi.h>
#include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h"
/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1,
ROCE_ASYNC_EVENT_SQ_DRAINED,
ROCE_ASYNC_EVENT_SRQ_LIMIT,
ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
ROCE_ASYNC_EVENT_CQ_ERR,
ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
#endif /* __QED_HSI_ROCE__ */

View File

@ -43,7 +43,8 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include "qedr_hsi.h"
#include <linux/qed/common_hsi.h>
#include "qedr_hsi_rdma.h"
#include <linux/qed/qed_if.h>
#include "qedr.h"
#include "verbs.h"

View File

@ -51,7 +51,7 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
#define DRV_MODULE_VERSION "8.10.10.20"
#define DRV_MODULE_VERSION "8.10.10.21"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16

View File

@ -71,8 +71,7 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@ -1126,7 +1125,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 32K */
/* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
{
u32 qm_line_crd;
/* In A0 - Limit the size of pbf queue so that only 511 commands with
* the minimum size of 4 (FCoE minimum size)
*/
bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
if (is_bb_a0)
cmdq_lines = min_t(u32, cmdq_lines, 1022);
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
QM_PF_QUEUE_GROUP_SIZE;
bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
u16 i, pq_id, pq_group;
/* a bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
bool is_vf_pq = (i >= p_params->num_pf_pqs);
struct qm_rf_pq_map tx_pq_map;
bool rl_valid = p_params->pq_params[i].rl_valid &&
(p_params->pq_params[i].vport_id <
MAX_QM_GLOBAL_RLS);
/* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
(p_params->pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
if (p_params->pq_params[i].rl_valid && !rl_valid)
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
/* fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
p_params->pq_params[i].rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg,
QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
p_params->pq_params[i].rl_valid ?
rl_valid ?
p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
/* if PQ is associated with a VF, add indication
* to PQ VF mask
*/
tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
(1 << (pq_id % tx_pq_vf_mask_width));
tx_pq_vf_mask[pq_id /
QM_PF_QUEUE_GROUP_SIZE] |=
BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
if (p_params->pf_id < MAX_NUM_PFS_BB)
crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
else
crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
(p_params->pf_id % MAX_NUM_PFS_BB);
crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
QM_WFQ_CRD_REG_SIGN_BIT);
}
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
return 0;
}
@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
{
u8 i, vport_id;
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
return -1;
}
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
if (vport_id >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
return -1;
}
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
return -1;
@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
/* comp ver */
reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
/* EDPM with geneve tunnel not supported in BB_B0 */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;

View File

@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
}
/* First Dword contains metadata and should be skipped */
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
buf_hdr = (struct bin_buffer_hdr *)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);

View File

@ -594,7 +594,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
return bd_flags;
}
@ -755,8 +755,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
@ -1588,33 +1588,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
p_tx->cur_send_frag_num++;
}
static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2,
struct qed_ll2_tx_packet *p_curp,
u8 num_of_bds,
enum core_tx_dest tx_dest,
u16 vlan,
u8 bd_flags,
u16 l4_hdr_offset_w,
enum core_roce_flavor_type type,
dma_addr_t first_frag,
u16 first_frag_len)
static void
qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2,
struct qed_ll2_tx_packet *p_curp,
u8 num_of_bds,
enum core_tx_dest tx_dest,
u16 vlan,
u8 bd_flags,
u16 l4_hdr_offset_w,
enum core_roce_flavor_type roce_flavor,
dma_addr_t first_frag,
u16 first_frag_len)
{
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL;
u16 frag_idx;
u16 bd_data = 0, frag_idx;
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
start_bd->bd_flags.as_bitfield = bd_flags;
start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
CORE_TX_BD_FLAGS_START_BD_SHIFT;
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
bd_data |= bd_flags;
SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
@ -1639,9 +1640,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
(*p_bd)->bd_flags.as_bitfield = 0;
(*p_bd)->bd_data.as_bitfield = 0;
(*p_bd)->bitfield1 = 0;
(*p_bd)->bitfield0 = 0;
p_curp->bds_set[frag_idx].tx_frag = 0;
p_curp->bds_set[frag_idx].frag_len = 0;
}
@ -2238,11 +2238,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
/* Request HW to calculate IP csum */
if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
if (skb_vlan_tag_present(skb)) {
vlan = skb_vlan_tag_get(skb);
flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
}
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),

View File

@ -356,6 +356,10 @@
0x238804UL
#define RDIF_REG_STOP_ON_ERROR \
0x300040UL
#define RDIF_REG_DEBUG_ERROR_INFO \
0x300400UL
#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
64
#define SRC_REG_SOFT_RST \
0x23874cUL
#define TCFC_REG_ACTIVITY_COUNTER \
@ -370,6 +374,10 @@
0x1700004UL
#define TDIF_REG_STOP_ON_ERROR \
0x310040UL
#define TDIF_REG_DEBUG_ERROR_INFO \
0x310400UL
#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
64
#define UCM_REG_INIT \
0x1280000UL
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
@ -1236,6 +1244,26 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
#define NWS_REG_DBG_SELECT \
0x700128UL
#define NWS_REG_DBG_DWORD_ENABLE \
0x70012cUL
#define NWS_REG_DBG_SHIFT \
0x700130UL
#define NWS_REG_DBG_FORCE_VALID \
0x700134UL
#define NWS_REG_DBG_FORCE_FRAME \
0x700138UL
#define MS_REG_DBG_SELECT \
0x6a0228UL
#define MS_REG_DBG_DWORD_ENABLE \
0x6a022cUL
#define MS_REG_DBG_SHIFT \
0x6a0230UL
#define MS_REG_DBG_FORCE_VALID \
0x6a0234UL
#define MS_REG_DBG_FORCE_FRAME \
0x6a0238UL
#define PCIE_REG_DBG_COMMON_SELECT \
0x054398UL
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
@ -1448,6 +1476,8 @@
0x000b48UL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
#define RSS_REG_RSS_RAM_DATA_SIZE \
4
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
#define NWS_REG_NWS_CMU \

View File

@ -66,13 +66,27 @@
#include "qed_roce.h"
#include "qed_ll2.h"
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
p_eqe->opcode, &p_eqe->data);
void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, union rdma_eqe_data *rdma_data)
{
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
u16 icid =
(u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
/* icid release in this async event can occur only if the icid
* was offloaded to the FW. In case it wasn't offloaded this is
* handled in qed_roce_sp_destroy_qp.
*/
qed_roce_free_real_icid(p_hwfn, icid);
} else {
struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
events->affiliated_event(p_hwfn->p_rdma_info->events.context,
fw_event_code,
&rdma_data->async_handle);
}
}
static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
@ -113,6 +127,15 @@ static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
return 0;
}
static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 id_num)
{
if (id_num >= bmap->max_count)
return;
__set_bit(id_num, bmap->bitmap);
}
static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 id_num)
{
@ -129,6 +152,15 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
}
}
static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 id_num)
{
if (id_num >= bmap->max_count)
return -1;
return test_bit(id_num, bmap->bitmap);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
@ -170,7 +202,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Queue zone lines are shared between RoCE and L2 in such a way that
* they can be used by each without obstructing the other.
*/
p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
/* Allocate a struct with device params and fill it */
p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@ -248,9 +281,18 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
goto free_tid_map;
}
/* Allocate bitmap for cids used for responders/requesters. */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate real cid bitmap, rc = %d\n", rc);
goto free_cid_map;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0;
free_cid_map:
kfree(p_rdma_info->cid_map.bitmap);
free_tid_map:
kfree(p_rdma_info->tid_map.bitmap);
free_toggle_map:
@ -273,7 +315,22 @@ free_rdma_info:
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
int wait_count = 0;
/* when destroying a_RoCE QP the control is returned to the user after
* the synchronous part. The asynchronous part may take a little longer.
* We delay for a short while if an async destroy QP is still expected.
* Beyond the added delay we clear the bitmap anyway.
*/
while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
msleep(100);
if (wait_count++ > 20) {
DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
break;
}
}
kfree(p_rdma_info->cid_map.bitmap);
kfree(p_rdma_info->tid_map.bitmap);
@ -724,6 +781,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
u32 addr;
p_hwfn = (struct qed_hwfn *)rdma_cxt;
if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
DP_NOTICE(p_hwfn,
"queue zone offset %d is too large (max is %d)\n",
qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
return;
}
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@ -1080,6 +1145,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
{
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@ -1139,6 +1212,13 @@ err:
return rc;
}
static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
{
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
@ -1147,7 +1227,8 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
u16 physical_queue0 = 0;
u16 regular_latency_queue;
enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@ -1229,15 +1310,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
memset(&qm_params, 0, sizeof(qm_params));
qm_params.roce.qpid = qp->icid >> 1;
physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
&qm_params);
p_ramrod->regular_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->low_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@ -1253,13 +1338,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
rc, physical_queue0);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"rc = %d regular physical queue = 0x%x\n", rc,
regular_latency_queue);
if (rc)
goto err;
qp->resp_offloaded = true;
qp->cq_prod = 0;
proto = p_hwfn->p_rdma_info->proto;
qed_roce_set_real_cid(p_hwfn, qp->icid -
qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@ -1280,7 +1371,8 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
u16 physical_queue0 = 0;
u16 regular_latency_queue;
enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@ -1351,15 +1443,19 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->sq_cq_id);
p_ramrod->cq_cid =
cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
memset(&qm_params, 0, sizeof(qm_params));
qm_params.roce.qpid = qp->icid >> 1;
physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
&qm_params);
p_ramrod->regular_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->low_latency_phy_queue =
cpu_to_le16(regular_latency_queue);
p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@ -1378,6 +1474,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
goto err;
qp->req_offloaded = true;
proto = p_hwfn->p_rdma_info->proto;
qed_roce_set_real_cid(p_hwfn,
qp->icid + 1 -
qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@ -1577,7 +1677,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
u32 *num_invalidated_mw)
u32 *num_invalidated_mw,
u32 *cq_prod)
{
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@ -1588,8 +1689,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (!qp->resp_offloaded)
*num_invalidated_mw = 0;
*cq_prod = qp->cq_prod;
if (!qp->resp_offloaded) {
/* If a responder was never offload, we need to free the cids
* allocated in create_qp as a FW async event will never arrive
*/
u32 cid;
cid = qp->icid -
qed_cxt_get_proto_cid_start(p_hwfn,
p_hwfn->p_rdma_info->proto);
qed_roce_free_cid_pair(p_hwfn, (u16)cid);
return 0;
}
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@ -1624,6 +1739,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
goto err;
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
qp->cq_prod = *cq_prod;
/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@ -1827,10 +1944,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
out_params->draining = false;
if (rq_err_state)
if (rq_err_state || sq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_ERR;
else if (sq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_SQE;
else if (sq_draining)
out_params->draining = true;
out_params->state = qp->cur_state;
@ -1849,10 +1964,9 @@ err_resp:
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
u32 start_cid;
u32 cq_prod;
int rc;
/* Destroys the specified QP */
@ -1866,7 +1980,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
&num_invalidated_mw);
&num_invalidated_mw,
&cq_prod);
if (rc)
return rc;
@ -1881,21 +1996,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
spin_lock_bh(&p_rdma_info->lock);
start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
p_rdma_info->proto);
/* Release responder's icid */
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
qp->icid - start_cid);
/* Release requester's icid */
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
qp->icid + 1 - start_cid);
spin_unlock_bh(&p_rdma_info->lock);
}
return 0;
@ -2110,12 +2210,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
/* Any state -> RESET */
u32 cq_prod;
/* Send destroy responder ramrod */
rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
qp,
&num_invalidated_mw,
&cq_prod);
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
&num_invalidated_mw);
if (rc)
return rc;
qp->cq_prod = cq_prod;
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
@ -2454,6 +2561,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
return rc;
}
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 start_cid, cid, xcid;
/* an even icid belongs to a responder while an odd icid belongs to a
* requester. The 'cid' received as an input can be either. We calculate
* the "partner" icid and call it xcid. Only if both are free then the
* "cid" map can be cleared.
*/
start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
cid = icid - start_cid;
xcid = cid ^ 1;
spin_lock_bh(&p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
}
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
{
return QED_LEADING_HWFN(cdev);
@ -2773,7 +2905,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
: QED_LL2_RROCE;
if (pkt->roce_mode == ROCE_V2_IPV4)
flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,

View File

@ -82,6 +82,7 @@ struct qed_rdma_info {
struct qed_bmap qp_map;
struct qed_bmap srq_map;
struct qed_bmap cid_map;
struct qed_bmap real_cid_map;
struct qed_bmap dpi_map;
struct qed_bmap toggle_bits;
struct qed_rdma_events events;
@ -92,6 +93,7 @@ struct qed_rdma_info {
u32 num_qps;
u32 num_mrs;
u16 queue_zone_base;
u16 max_queue_zones;
enum protocol_type proto;
};
@ -153,6 +155,7 @@ struct qed_rdma_qp {
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
u32 cq_prod;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
@ -163,8 +166,8 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe);
void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, union rdma_eqe_data *rdma_data);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
@ -187,7 +190,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u16 src_mac_addr_lo, bool b_last_packet);
#else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
union rdma_eqe_data *rdma_data) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,

View File

@ -296,9 +296,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
#if IS_ENABLED(CONFIG_QED_RDMA)
case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe);
qed_roce_async_event(p_hwfn, p_eqe->opcode,
&p_eqe->data.rdma_data);
return 0;
#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
@ -306,14 +309,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
qed_ooo_release_connection_isles(p_hwfn,
p_hwfn->p_ooo_info,
cid);
return 0;
}
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;

View File

@ -50,7 +50,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 10
#define QEDE_ENGINEERING_VERSION 20
#define QEDE_ENGINEERING_VERSION 21
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \

View File

@ -1,5 +1,5 @@
obj-$(CONFIG_QEDF) := qedf.o
qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
qedf_attr.o qedf_els.o
qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o

View File

@ -0,0 +1,190 @@
/* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "drv_fcoe_fw_funcs.h"
#include "drv_scsi_fw_funcs.h"
#define FCOE_RX_ID ((u32)0x0000FFFF)
static inline void init_common_sqe(struct fcoe_task_params *task_params,
enum fcoe_sqe_request_type request_type)
{
memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
request_type);
task_params->sqe->task_id = task_params->itid;
}
int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
struct scsi_sgl_task_params *sgl_task_params,
struct regpair sense_data_buffer_phys_addr,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
struct fcoe_task_context *ctx = task_params->context;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 io_size, val;
bool slow_sgl;
memset(ctx, 0, sizeof(*(ctx)));
slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge);
io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
task_params->tx_io_size : task_params->rx_io_size);
/* Ystorm ctx */
y_st_ctx = &ctx->ystorm_st_context;
y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
y_st_ctx->task_type = task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
/* Tstorm ctx */
t_st_ctx = &ctx->tstorm_st_context;
t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
FCOE_TASK_DEV_TYPE_TAPE :
FCOE_TASK_DEV_TYPE_DISK);
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
t_st_ctx->read_only.task_type = task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
/* Ustorm ctx */
u_ag_ctx = &ctx->ustorm_ag_context;
u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
/* Mstorm buffer for sense/rsp data placement */
m_st_ctx = &ctx->mstorm_st_context;
val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
m_st_ctx->rsp_buf_addr.hi = val;
val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
m_st_ctx->rsp_buf_addr.lo = val;
if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
/* Ystorm ctx */
y_st_ctx->expect_first_xfer = 1;
/* Set the amount of super SGEs. Can be up to 4. */
SET_FIELD(y_st_ctx->sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
init_scsi_sgl_context(&y_st_ctx->sgl_params,
&y_st_ctx->data_desc,
sgl_task_params);
/* Mstorm ctx */
SET_FIELD(m_st_ctx->flags,
MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
} else {
/* Tstorm ctx */
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
/* Mstorm ctx */
m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
init_scsi_sgl_context(&m_st_ctx->sgl_params,
&m_st_ctx->data_desc,
sgl_task_params);
}
init_common_sqe(task_params, SEND_FCOE_CMD);
return 0;
}
int init_initiator_midpath_unsolicited_fcoe_task(
struct fcoe_task_params *task_params,
struct fcoe_tx_mid_path_params *mid_path_fc_header,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
struct fcoe_task_context *ctx = task_params->context;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 val;
memset(ctx, 0, sizeof(*(ctx)));
/* Init Ystorm */
y_st_ctx = &ctx->ystorm_st_context;
init_scsi_sgl_context(&y_st_ctx->sgl_params,
&y_st_ctx->data_desc,
tx_sgl_task_params);
SET_FIELD(y_st_ctx->sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
y_st_ctx->task_type = task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
/* Init Mstorm */
m_st_ctx = &ctx->mstorm_st_context;
init_scsi_sgl_context(&m_st_ctx->sgl_params,
&m_st_ctx->data_desc,
rx_sgl_task_params);
SET_FIELD(m_st_ctx->flags,
MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
fw_to_place_fc_header);
m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
/* Init Tstorm */
t_st_ctx = &ctx->tstorm_st_context;
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
t_st_ctx->read_only.task_type = task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
/* Init Ustorm */
u_ag_ctx = &ctx->ustorm_ag_context;
u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
/* Init SQE */
init_common_sqe(task_params, SEND_FCOE_MIDPATH);
task_params->sqe->additional_info_union.burst_length =
tx_sgl_task_params->total_buffer_size;
SET_FIELD(task_params->sqe->flags,
FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
SCSI_FAST_SGL);
return 0;
}
int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
{
init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
return 0;
}
int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
{
init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
return 0;
}
int init_initiator_sequence_recovery_fcoe_task(
struct fcoe_task_params *task_params, u32 off)
{
init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
return 0;
}

View File

@ -0,0 +1,93 @@
/* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _FCOE_FW_FUNCS_H
#define _FCOE_FW_FUNCS_H
#include "drv_scsi_fw_funcs.h"
#include "qedf_hsi.h"
#include <linux/qed/qed_if.h>
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
struct fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
enum fcoe_task_type task_type;
u32 tx_io_size; /* in bytes */
u32 rx_io_size; /* in bytes */
u32 conn_cid;
u16 itid;
u8 cq_rss_number;
/* Whether it's Tape device or not (0=Disk, 1=Tape) */
u8 is_tape_device;
};
/**
* @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
* read/write task types and init fcoe_sqe
*
* @param task_params - Pointer to task parameters struct
* @param sgl_task_params - Pointer to SGL task params
* @param sense_data_buffer_phys_addr - Pointer to sense data buffer
* @param task_retry_id - retry identification - Used only for Tape device
* @param fcp_cmnd_payload - FCP CMD Payload
*/
int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
struct scsi_sgl_task_params *sgl_task_params,
struct regpair sense_data_buffer_phys_addr,
u32 task_retry_id,
u8 fcp_cmd_payload[32]);
/**
* @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
* midpath/unsolicited task types and init fcoe_sqe
*
* @param task_params - Pointer to task parameters struct
* @param mid_path_fc_header - FC header
* @param tx_sgl_task_params - Pointer to Tx SGL task params
* @param rx_sgl_task_params - Pointer to Rx SGL task params
* @param fw_to_place_fc_header - Indication if the FW will place the FC header
* in addition to the data arrives.
*/
int init_initiator_midpath_unsolicited_fcoe_task(
struct fcoe_task_params *task_params,
struct fcoe_tx_mid_path_params *mid_path_fc_header,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header);
/**
* @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
* abort task types and init fcoe_sqe
*
* @param task_params - Pointer to task parameters struct
*/
int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
/**
* @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
* cleanup task types and init fcoe_sqe
*
*
* @param task_params - Pointer to task parameters struct
*/
int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
/**
* @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
* sequence recovery task types and init fcoe_sqe
*
*
* @param task_params - Pointer to task parameters struct
* @param desired_offset - The desired offest the task will be re-sent from
*/
int init_initiator_sequence_recovery_fcoe_task(
struct fcoe_task_params *task_params,
u32 desired_offset);
#endif

View File

@ -0,0 +1,44 @@
/* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "drv_scsi_fw_funcs.h"
#define SCSI_NUM_SGES_IN_CACHE 0x4
bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
{
return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
}
void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
struct scsi_cached_sges *ctx_data_desc,
struct scsi_sgl_task_params *sgl_task_params)
{
/* no need to check for sgl_task_params->sgl validity */
u8 num_sges_to_init = sgl_task_params->num_sges >
SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
sgl_task_params->num_sges;
u8 sge_index;
u32 val;
val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
ctx_sgl_params->sgl_addr.lo = val;
val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
ctx_sgl_params->sgl_addr.hi = val;
val = cpu_to_le32(sgl_task_params->total_buffer_size);
ctx_sgl_params->sgl_total_length = val;
ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
ctx_data_desc->sge[sge_index].sge_addr.lo = val;
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
ctx_data_desc->sge[sge_index].sge_addr.hi = val;
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
ctx_data_desc->sge[sge_index].sge_len = val;
}
}

View File

@ -0,0 +1,85 @@
/* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _SCSI_FW_FUNCS_H
#define _SCSI_FW_FUNCS_H
#include <linux/qed/common_hsi.h>
#include <linux/qed/storage_common.h>
#include <linux/qed/fcoe_common.h>
struct scsi_sgl_task_params {
struct scsi_sge *sgl;
struct regpair sgl_phys_addr;
u32 total_buffer_size;
u16 num_sges;
/* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
* -> relevant for tx only
*/
bool small_mid_sge;
};
struct scsi_dif_task_params {
u32 initial_ref_tag;
bool initial_ref_tag_is_valid;
u16 application_tag;
u16 application_tag_mask;
u16 dif_block_size_log;
bool dif_on_network;
bool dif_on_host;
u8 host_guard_type;
u8 protection_type;
u8 ref_tag_mask;
bool crc_seed;
/* Enable Connection error upon DIF error (segments with DIF errors are
* dropped)
*/
bool tx_dif_conn_err_en;
bool ignore_app_tag;
bool keep_ref_tag_const;
bool validate_guard;
bool validate_app_tag;
bool validate_ref_tag;
bool forward_guard;
bool forward_app_tag;
bool forward_ref_tag;
bool forward_app_tag_with_mask;
bool forward_ref_tag_with_mask;
};
struct scsi_initiator_cmd_params {
/* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
* pointer to the CDB buffer SGE
*/
struct scsi_sge extended_cdb_sge;
/* Physical address of sense data buffer for sense data - 256B buffer */
struct regpair sense_data_buffer_phys_addr;
};
/**
* @brief scsi_is_slow_sgl - checks for slow SGL
*
* @param num_sges - number of sges in SGL
* @param small_mid_sge - True is the SGL contains an SGE which is smaller than
* 4KB and its not the 1st or last SGE in the SGL
*/
bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
/**
* @brief init_scsi_sgl_context - initializes SGL task context
*
* @param sgl_params - SGL context parameters to initialize (output parameter)
* @param data_desc - context struct containing SGEs array to set (output
* parameter)
* @param sgl_task_params - SGL parameters (input)
*/
void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
struct scsi_cached_sges *ctx_data_desc,
struct scsi_sgl_task_params *sgl_task_params);
#endif

View File

@ -26,6 +26,7 @@
#include <linux/qed/qed_ll2_if.h>
#include "qedf_version.h"
#include "qedf_dbg.h"
#include "drv_fcoe_fw_funcs.h"
/* Helpers to extract upper and lower 32-bits of pointer */
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
@ -59,19 +60,17 @@
#define UPSTREAM_KEEP 1
struct qedf_mp_req {
uint8_t tm_flags;
uint32_t req_len;
void *req_buf;
dma_addr_t req_buf_dma;
struct fcoe_sge *mp_req_bd;
struct scsi_sge *mp_req_bd;
dma_addr_t mp_req_bd_dma;
struct fc_frame_header req_fc_hdr;
uint32_t resp_len;
void *resp_buf;
dma_addr_t resp_buf_dma;
struct fcoe_sge *mp_resp_bd;
struct scsi_sge *mp_resp_bd;
dma_addr_t mp_resp_bd_dma;
struct fc_frame_header resp_fc_hdr;
};
@ -119,6 +118,7 @@ struct qedf_ioreq {
#define QEDF_CMD_IN_CLEANUP 0x2
#define QEDF_CMD_SRR_SENT 0x3
u8 io_req_flags;
uint8_t tm_flags;
struct qedf_rport *fcport;
unsigned long flags;
enum qedf_ioreq_event event;
@ -130,6 +130,8 @@ struct qedf_ioreq {
struct completion tm_done;
struct completion abts_done;
struct fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
/*
* Need to allocate enough room for both sense data and FCP response data
@ -199,8 +201,8 @@ struct qedf_rport {
dma_addr_t sq_pbl_dma;
u32 sq_pbl_size;
u32 sid;
#define QEDF_RPORT_TYPE_DISK 1
#define QEDF_RPORT_TYPE_TAPE 2
#define QEDF_RPORT_TYPE_DISK 0
#define QEDF_RPORT_TYPE_TAPE 1
uint dev_type; /* Disk or tape */
struct list_head peers;
};
@ -391,7 +393,7 @@ struct qedf_ctx {
struct io_bdt {
struct qedf_ioreq *io_req;
struct fcoe_sge *bd_tbl;
struct scsi_sge *bd_tbl;
dma_addr_t bd_tbl_dma;
u16 bd_valid;
};
@ -400,7 +402,7 @@ struct qedf_cmd_mgr {
struct qedf_ctx *qedf;
u16 idx;
struct io_bdt **io_bdt_pool;
#define FCOE_PARAMS_NUM_TASKS 4096
#define FCOE_PARAMS_NUM_TASKS 2048
struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
spinlock_t lock;
atomic_t free_list_cnt;
@ -465,9 +467,8 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
struct fcoe_task_context *task_ctx);
extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req);

View File

@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
uint16_t xid;
uint32_t start_time = jiffies / HZ;
uint32_t current_time;
struct fcoe_wqe *sqe;
unsigned long flags;
u16 sqe_idx;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
@ -113,20 +116,25 @@ retry_els:
/* Obtain exchange id */
xid = els_req->xid;
spin_lock_irqsave(&fcport->rport_lock, flags);
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(els_req, task);
qedf_init_mp_task(els_req, task, sqe);
/* Put timer on original I/O request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
/* Ring doorbell */
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
"req\n");
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
els_err:
return rc;
}
@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
struct qedf_rport *fcport;
unsigned long flags;
struct qedf_els_cb_arg *cb_arg;
struct fcoe_wqe *sqe;
u16 sqe_idx;
fcport = orig_io_req->fcport;
@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
spin_lock_irqsave(&fcport->rport_lock, flags);
qedf_add_to_sq(fcport, orig_io_req->xid, 0,
FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
orig_io_req->task_params->sqe = sqe;
init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
offset);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);

View File

@ -96,7 +96,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
if (!cmgr->io_bdt_pool)
goto free_cmd_pool;
bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
for (i = 0; i < num_ios; i++) {
bdt_info = cmgr->io_bdt_pool[i];
if (bdt_info->bd_tbl) {
@ -119,6 +119,8 @@ free_cmd_pool:
for (i = 0; i < num_ios; i++) {
io_req = &cmgr->cmds[i];
kfree(io_req->sgl_task_params);
kfree(io_req->task_params);
/* Make sure we free per command sense buffer */
if (io_req->sense_buffer)
dma_free_coherent(&qedf->pdev->dev,
@ -178,7 +180,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
spin_lock_init(&cmgr->lock);
/*
* Initialize list of qedf_ioreq.
* Initialize I/O request fields.
*/
xid = QEDF_MIN_XID;
@ -196,6 +198,29 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
GFP_KERNEL);
if (!io_req->sense_buffer)
goto mem_err;
/* Allocate task parameters to pass to f/w init funcions */
io_req->task_params = kzalloc(sizeof(*io_req->task_params),
GFP_KERNEL);
if (!io_req->task_params) {
QEDF_ERR(&(qedf->dbg_ctx),
"Failed to allocate task_params for xid=0x%x\n",
i);
goto mem_err;
}
/*
* Allocate scatter/gather list info to pass to f/w init
* functions.
*/
io_req->sgl_task_params = kzalloc(
sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
if (!io_req->sgl_task_params) {
QEDF_ERR(&(qedf->dbg_ctx),
"Failed to allocate sgl_task_params for xid=0x%x\n",
i);
goto mem_err;
}
}
/* Allocate pool of io_bdts - one for each qedf_ioreq */
@ -211,8 +236,8 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
GFP_KERNEL);
if (!cmgr->io_bdt_pool[i]) {
QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
"io_bdt_pool[%d].\n", i);
QEDF_WARN(&(qedf->dbg_ctx),
"Failed to alloc io_bdt_pool[%d].\n", i);
goto mem_err;
}
}
@ -220,11 +245,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
for (i = 0; i < num_ios; i++) {
bdt_info = cmgr->io_bdt_pool[i];
bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
&bdt_info->bd_tbl_dma, GFP_KERNEL);
if (!bdt_info->bd_tbl) {
QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
"bdt_tbl[%d].\n", i);
QEDF_WARN(&(qedf->dbg_ctx),
"Failed to alloc bdt_tbl[%d].\n", i);
goto mem_err;
}
}
@ -318,6 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
}
bd_tbl->io_req = io_req;
io_req->cmd_type = cmd_type;
io_req->tm_flags = 0;
/* Reset sequence offset data */
io_req->rx_buf_off = 0;
@ -336,10 +362,9 @@ static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_ctx *qedf = io_req->fcport->qedf;
uint64_t sz = sizeof(struct fcoe_sge);
uint64_t sz = sizeof(struct scsi_sge);
/* clear tm flags */
mp_req->tm_flags = 0;
if (mp_req->mp_req_bd) {
dma_free_coherent(&qedf->pdev->dev, sz,
mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
@ -387,7 +412,7 @@ void qedf_release_cmd(struct kref *ref)
static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
int bd_index)
{
struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
int frag_size, sg_frags;
sg_frags = 0;
@ -398,7 +423,7 @@ static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
frag_size = sg_len;
bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
bd[bd_index + sg_frags].size = (uint16_t)frag_size;
bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
addr += (u64)frag_size;
sg_frags++;
@ -413,7 +438,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
struct scatterlist *sg;
int byte_count = 0;
int sg_count = 0;
@ -439,7 +464,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
bd[bd_count].sge_addr.hi = (addr >> 32);
bd[bd_count].size = (u16)sg_len;
bd[bd_count].sge_len = (u16)sg_len;
return ++bd_count;
}
@ -480,7 +505,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
sg_frags = 1;
bd[bd_count].sge_addr.lo = U64_LO(addr);
bd[bd_count].sge_addr.hi = U64_HI(addr);
bd[bd_count].size = (uint16_t)sg_len;
bd[bd_count].sge_len = (uint16_t)sg_len;
}
bd_count += sg_frags;
@ -498,7 +523,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
{
struct scsi_cmnd *sc = io_req->sc_cmd;
struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
int bd_count;
if (scsi_sg_count(sc)) {
@ -508,7 +533,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
} else {
bd_count = 0;
bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
bd[0].size = 0;
bd[0].sge_len = 0;
}
io_req->bd_tbl->bd_valid = bd_count;
@ -529,430 +554,223 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
/* 4 bytes: flag info */
fcp_cmnd->fc_pri_ta = 0;
fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
fcp_cmnd->fc_tm_flags = io_req->tm_flags;
fcp_cmnd->fc_flags = io_req->io_req_flags;
fcp_cmnd->fc_cmdref = 0;
/* Populate data direction */
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
} else {
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
}
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
/* 16 bytes: CDB information */
memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
/* 4 bytes: FCP data length */
fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
struct qedf_ioreq *io_req, u32 *ptu_invalidate,
struct fcoe_task_context *task_ctx)
struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct io_bdt *bd_tbl = io_req->bd_tbl;
union fcoe_data_desc_ctx *data_desc;
u32 *fcp_cmnd;
u8 fcp_cmnd[32];
u32 tmp_fcp_cmnd[8];
int cnt, i;
int bd_count;
int bd_count = 0;
struct qedf_ctx *qedf = fcport->qedf;
uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
u8 tmp_sgl_mode = 0;
u8 mst_sgl_mode = 0;
struct regpair sense_data_buffer_phys_addr;
u32 tx_io_size = 0;
u32 rx_io_size = 0;
int i, cnt;
memset(task_ctx, 0, sizeof(struct fcoe_task_context));
/* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
else
/* Set task type bassed on DMA directio of command */
if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
task_type = FCOE_TASK_TYPE_READ_INITIATOR;
/* Y Storm context */
task_ctx->ystorm_st_context.expect_first_xfer = 1;
task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
/* Check if this is required */
task_ctx->ystorm_st_context.ox_id = io_req->xid;
task_ctx->ystorm_st_context.task_rety_identifier =
io_req->task_retry_identifier;
/* T Storm ag context */
SET_FIELD(task_ctx->tstorm_ag_context.flags0,
TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
/* T Storm st context */
SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
1);
task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
task_ctx->tstorm_st_context.read_only.dev_type =
FCOE_TASK_DEV_TYPE_DISK;
task_ctx->tstorm_st_context.read_only.conf_supported = 0;
task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
/* Completion queue for response. */
task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
io_req->data_xfer_len;
task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
lport->e_d_tov;
task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
io_req->fp_idx = cq_idx;
bd_count = bd_tbl->bd_valid;
if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
/* Setup WRITE task */
struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
task_ctx->ystorm_st_context.task_type =
FCOE_TASK_TYPE_WRITE_INITIATOR;
data_desc = &task_ctx->ystorm_st_context.data_desc;
if (io_req->use_slowpath) {
SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
FCOE_SLOW_SGL);
data_desc->slow.base_sgl_addr.lo =
U64_LO(bd_tbl->bd_tbl_dma);
data_desc->slow.base_sgl_addr.hi =
U64_HI(bd_tbl->bd_tbl_dma);
data_desc->slow.remainder_num_sges = bd_count;
data_desc->slow.curr_sge_off = 0;
data_desc->slow.curr_sgl_index = 0;
qedf->slow_sge_ios++;
io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
} else {
SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
(bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
FCOE_MUL_FAST_SGES);
if (bd_count == 1) {
data_desc->single_sge.sge_addr.lo =
fcoe_bd_tbl->sge_addr.lo;
data_desc->single_sge.sge_addr.hi =
fcoe_bd_tbl->sge_addr.hi;
data_desc->single_sge.size =
fcoe_bd_tbl->size;
data_desc->single_sge.is_valid_sge = 0;
qedf->single_sge_ios++;
io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
} else {
data_desc->fast.sgl_start_addr.lo =
U64_LO(bd_tbl->bd_tbl_dma);
data_desc->fast.sgl_start_addr.hi =
U64_HI(bd_tbl->bd_tbl_dma);
data_desc->fast.sgl_byte_offset =
data_desc->fast.sgl_start_addr.lo &
(QEDF_PAGE_SIZE - 1);
if (data_desc->fast.sgl_byte_offset > 0)
QEDF_ERR(&(qedf->dbg_ctx),
"byte_offset=%u for xid=0x%x.\n",
io_req->xid,
data_desc->fast.sgl_byte_offset);
data_desc->fast.task_reuse_cnt =
io_req->reuse_count;
io_req->reuse_count++;
if (io_req->reuse_count == QEDF_MAX_REUSE) {
*ptu_invalidate = 1;
io_req->reuse_count = 0;
}
qedf->fast_sge_ios++;
io_req->sge_type = QEDF_IOREQ_FAST_SGE;
}
}
/* T Storm context */
task_ctx->tstorm_st_context.read_only.task_type =
FCOE_TASK_TYPE_WRITE_INITIATOR;
/* M Storm context */
tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
tmp_sgl_mode);
} else {
/* Setup READ task */
/* M Storm context */
struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
task_ctx->mstorm_st_context.fp.data_2_trns_rem =
io_req->data_xfer_len;
if (io_req->use_slowpath) {
SET_FIELD(
task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
FCOE_SLOW_SGL);
data_desc->slow.base_sgl_addr.lo =
U64_LO(bd_tbl->bd_tbl_dma);
data_desc->slow.base_sgl_addr.hi =
U64_HI(bd_tbl->bd_tbl_dma);
data_desc->slow.remainder_num_sges =
bd_count;
data_desc->slow.curr_sge_off = 0;
data_desc->slow.curr_sgl_index = 0;
qedf->slow_sge_ios++;
io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
tx_io_size = io_req->data_xfer_len;
} else {
SET_FIELD(
task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
(bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
FCOE_MUL_FAST_SGES);
if (bd_count == 1) {
data_desc->single_sge.sge_addr.lo =
fcoe_bd_tbl->sge_addr.lo;
data_desc->single_sge.sge_addr.hi =
fcoe_bd_tbl->sge_addr.hi;
data_desc->single_sge.size =
fcoe_bd_tbl->size;
data_desc->single_sge.is_valid_sge = 0;
qedf->single_sge_ios++;
io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
} else {
data_desc->fast.sgl_start_addr.lo =
U64_LO(bd_tbl->bd_tbl_dma);
data_desc->fast.sgl_start_addr.hi =
U64_HI(bd_tbl->bd_tbl_dma);
data_desc->fast.sgl_byte_offset = 0;
data_desc->fast.task_reuse_cnt =
io_req->reuse_count;
io_req->reuse_count++;
if (io_req->reuse_count == QEDF_MAX_REUSE) {
*ptu_invalidate = 1;
io_req->reuse_count = 0;
}
qedf->fast_sge_ios++;
io_req->sge_type = QEDF_IOREQ_FAST_SGE;
}
task_type = FCOE_TASK_TYPE_READ_INITIATOR;
rx_io_size = io_req->data_xfer_len;
}
/* Y Storm context */
task_ctx->ystorm_st_context.expect_first_xfer = 0;
task_ctx->ystorm_st_context.task_type =
FCOE_TASK_TYPE_READ_INITIATOR;
/* T Storm context */
task_ctx->tstorm_st_context.read_only.task_type =
FCOE_TASK_TYPE_READ_INITIATOR;
mst_sgl_mode = GET_FIELD(
task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
mst_sgl_mode);
}
/* Setup the fields for fcoe_task_params */
io_req->task_params->context = task_ctx;
io_req->task_params->sqe = sqe;
io_req->task_params->task_type = task_type;
io_req->task_params->tx_io_size = tx_io_size;
io_req->task_params->rx_io_size = rx_io_size;
io_req->task_params->conn_cid = fcport->fw_cid;
io_req->task_params->itid = io_req->xid;
io_req->task_params->cq_rss_number = cq_idx;
io_req->task_params->is_tape_device = fcport->dev_type;
/* Fill in information for scatter/gather list */
if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
bd_count = bd_tbl->bd_valid;
io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
io_req->sgl_task_params->sgl_phys_addr.lo =
U64_LO(bd_tbl->bd_tbl_dma);
io_req->sgl_task_params->sgl_phys_addr.hi =
U64_HI(bd_tbl->bd_tbl_dma);
io_req->sgl_task_params->num_sges = bd_count;
io_req->sgl_task_params->total_buffer_size =
scsi_bufflen(io_req->sc_cmd);
io_req->sgl_task_params->small_mid_sge =
io_req->use_slowpath;
}
/* Fill in physical address of sense buffer */
sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
/* fill FCP_CMND IU */
fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
/* Swap fcp_cmnd since FC is big endian */
cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
for (i = 0; i < cnt; i++) {
*fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
fcp_cmnd++;
tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
}
memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
/* M Storm context - Sense buffer */
task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
U64_LO(io_req->sense_buffer_dma);
task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
U64_HI(io_req->sense_buffer_dma);
init_initiator_rw_fcoe_task(io_req->task_params,
io_req->sgl_task_params,
sense_data_buffer_phys_addr,
io_req->task_retry_identifier, fcp_cmnd);
/* Increment SGL type counters */
if (bd_count == 1) {
qedf->single_sge_ios++;
io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
} else if (io_req->use_slowpath) {
qedf->slow_sge_ios++;
io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
} else {
qedf->fast_sge_ios++;
io_req->sge_type = QEDF_IOREQ_FAST_SGE;
}
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
struct fcoe_task_context *task_ctx)
struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
struct qedf_ctx *qedf = io_req->fcport->qedf;
struct fc_frame_header *fc_hdr;
enum fcoe_task_type task_type = 0;
union fcoe_data_desc_ctx *data_desc;
struct fcoe_tx_mid_path_params task_fc_hdr;
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
"for cmd_type = %d\n", io_req->cmd_type);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Initializing MP task for cmd_type=%d\n",
io_req->cmd_type);
qedf->control_requests++;
/* Obtain task_type */
if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
(io_req->cmd_type == QEDF_ELS)) {
task_type = FCOE_TASK_TYPE_MIDPATH;
} else if (io_req->cmd_type == QEDF_ABTS) {
task_type = FCOE_TASK_TYPE_ABTS;
}
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
io_req->task = task_ctx;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
task_type);
/* Setup the fields for fcoe_task_params */
io_req->task_params->context = task_ctx;
io_req->task_params->sqe = sqe;
io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
io_req->task_params->tx_io_size = io_req->data_xfer_len;
/* rx_io_size tells the f/w how large a response buffer we have */
io_req->task_params->rx_io_size = PAGE_SIZE;
io_req->task_params->conn_cid = fcport->fw_cid;
io_req->task_params->itid = io_req->xid;
/* Return middle path commands on CQ 0 */
io_req->task_params->cq_rss_number = 0;
io_req->task_params->is_tape_device = fcport->dev_type;
/* YSTORM only */
{
/* Initialize YSTORM task context */
struct fcoe_tx_mid_path_params *task_fc_hdr =
&task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
task_ctx->ystorm_st_context.task_rety_identifier =
io_req->task_retry_identifier;
fc_hdr = &(mp_req->req_fc_hdr);
/* Set OX_ID and RX_ID based on driver task id */
fc_hdr->fh_ox_id = io_req->xid;
fc_hdr->fh_rx_id = htons(0xffff);
/* Init SGL parameters */
if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
(task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
data_desc = &task_ctx->ystorm_st_context.data_desc;
data_desc->slow.base_sgl_addr.lo =
U64_LO(mp_req->mp_req_bd_dma);
data_desc->slow.base_sgl_addr.hi =
U64_HI(mp_req->mp_req_bd_dma);
data_desc->slow.remainder_num_sges = 1;
data_desc->slow.curr_sge_off = 0;
data_desc->slow.curr_sgl_index = 0;
}
/* Set up FC header information */
task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
task_fc_hdr.type = fc_hdr->fh_type;
task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
fc_hdr = &(mp_req->req_fc_hdr);
if (task_type == FCOE_TASK_TYPE_MIDPATH) {
fc_hdr->fh_ox_id = io_req->xid;
fc_hdr->fh_rx_id = htons(0xffff);
} else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
fc_hdr->fh_rx_id = io_req->xid;
}
/* Set up s/g list parameters for request buffer */
tx_sgl_task_params.sgl = mp_req->mp_req_bd;
tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
tx_sgl_task_params.num_sges = 1;
/* Set PAGE_SIZE for now since sg element is that size ??? */
tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
tx_sgl_task_params.small_mid_sge = 0;
/* Fill FC Header into middle path buffer */
task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
task_fc_hdr->type = fc_hdr->fh_type;
task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
/* Set up s/g list parameters for request buffer */
rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
rx_sgl_task_params.num_sges = 1;
/* Set PAGE_SIZE for now since sg element is that size ??? */
rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
rx_sgl_task_params.small_mid_sge = 0;
task_ctx->ystorm_st_context.data_2_trns_rem =
io_req->data_xfer_len;
task_ctx->ystorm_st_context.task_type = task_type;
}
/* TSTORM ONLY */
{
task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
/* Always send middle-path repsonses on CQ #0 */
task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
io_req->fp_idx = 0;
SET_FIELD(task_ctx->tstorm_ag_context.flags0,
TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
PROTOCOLID_FCOE);
task_ctx->tstorm_st_context.read_only.task_type = task_type;
SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
1);
task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
}
/*
* Last arg is 0 as previous code did not set that we wanted the
* fc header information.
*/
init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
&task_fc_hdr,
&tx_sgl_task_params,
&rx_sgl_task_params, 0);
/* MSTORM only */
{
if (task_type == FCOE_TASK_TYPE_MIDPATH) {
/* Initialize task context */
data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
/* Set cache sges address and length */
data_desc->slow.base_sgl_addr.lo =
U64_LO(mp_req->mp_resp_bd_dma);
data_desc->slow.base_sgl_addr.hi =
U64_HI(mp_req->mp_resp_bd_dma);
data_desc->slow.remainder_num_sges = 1;
data_desc->slow.curr_sge_off = 0;
data_desc->slow.curr_sgl_index = 0;
/*
* Also need to fil in non-fastpath response address
* for middle path commands.
*/
task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
U64_LO(mp_req->mp_resp_bd_dma);
task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
U64_HI(mp_req->mp_resp_bd_dma);
}
}
/* USTORM ONLY */
{
task_ctx->ustorm_ag_context.global_cq_num = 0;
}
/* I/O stats. Middle path commands always use slow SGEs */
qedf->slow_sge_ios++;
io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
/* Midpath requests always consume 1 SGE */
qedf->single_sge_ios++;
}
void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
enum fcoe_task_type req_type, u32 offset)
/* Presumed that fcport->rport_lock is held */
u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
{
struct fcoe_wqe *sqe;
uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
u16 rval;
sqe = &fcport->sq[fcport->sq_prod_idx];
rval = fcport->sq_prod_idx;
/* Adjust ring index */
fcport->sq_prod_idx++;
fcport->fw_sq_prod_idx++;
if (fcport->sq_prod_idx == total_sqe)
fcport->sq_prod_idx = 0;
switch (req_type) {
case FCOE_TASK_TYPE_WRITE_INITIATOR:
case FCOE_TASK_TYPE_READ_INITIATOR:
SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
if (ptu_invalidate)
SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
break;
case FCOE_TASK_TYPE_MIDPATH:
SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
break;
case FCOE_TASK_TYPE_ABTS:
SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
SEND_FCOE_ABTS_REQUEST);
break;
case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
FCOE_EXCHANGE_CLEANUP);
break;
case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
FCOE_SEQUENCE_RECOVERY);
/* NOTE: offset param only used for sequence recovery */
sqe->additional_info_union.seq_rec_updated_offset = offset;
break;
case FCOE_TASK_TYPE_UNSOLICITED:
break;
default:
break;
}
sqe->task_id = xid;
/* Make sure SQ data is coherent */
wmb();
return rval;
}
void qedf_ring_doorbell(struct qedf_rport *fcport)
@ -1029,7 +847,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct fcoe_task_context *task_ctx;
u16 xid;
enum fcoe_task_type req_type = 0;
u32 ptu_invalidate = 0;
struct fcoe_wqe *sqe;
u16 sqe_idx;
/* Initialize rest of io_req fileds */
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
@ -1061,6 +880,16 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EAGAIN;
}
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
kref_put(&io_req->refcount, qedf_release_cmd);
}
/* Obtain free SQE */
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
/* Get the task context */
task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
if (!task_ctx) {
@ -1070,15 +899,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EINVAL;
}
qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
kref_put(&io_req->refcount, qedf_release_cmd);
}
/* Obtain free SQ entry */
qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
/* Ring doorbell */
qedf_ring_doorbell(fcport);
@ -1661,6 +1482,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
u32 r_a_tov = 0;
int rc = 0;
unsigned long flags;
struct fcoe_wqe *sqe;
u16 sqe_idx;
r_a_tov = rdata->r_a_tov;
lport = qedf->lport;
@ -1712,10 +1535,12 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
spin_lock_irqsave(&fcport->rport_lock, flags);
/* Add ABTS to send queue */
qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
io_req->task_params->sqe = sqe;
/* Ring doorbell */
init_initiator_abort_fcoe_task(io_req->task_params);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
@ -1784,8 +1609,8 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
int qedf_init_mp_req(struct qedf_ioreq *io_req)
{
struct qedf_mp_req *mp_req;
struct fcoe_sge *mp_req_bd;
struct fcoe_sge *mp_resp_bd;
struct scsi_sge *mp_req_bd;
struct scsi_sge *mp_resp_bd;
struct qedf_ctx *qedf = io_req->fcport->qedf;
dma_addr_t addr;
uint64_t sz;
@ -1819,7 +1644,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
}
/* Allocate and map mp_req_bd and mp_resp_bd */
sz = sizeof(struct fcoe_sge);
sz = sizeof(struct scsi_sge);
mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
&mp_req->mp_req_bd_dma, GFP_KERNEL);
if (!mp_req->mp_req_bd) {
@ -1841,7 +1666,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->sge_addr.lo = U64_LO(addr);
mp_req_bd->sge_addr.hi = U64_HI(addr);
mp_req_bd->size = QEDF_PAGE_SIZE;
mp_req_bd->sge_len = QEDF_PAGE_SIZE;
/*
* MP buffer is either a task mgmt command or an ELS.
@ -1852,7 +1677,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
addr = mp_req->resp_buf_dma;
mp_resp_bd->sge_addr.lo = U64_LO(addr);
mp_resp_bd->sge_addr.hi = U64_HI(addr);
mp_resp_bd->size = QEDF_PAGE_SIZE;
mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
return 0;
}
@ -1895,6 +1720,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
int tmo = 0;
int rc = SUCCESS;
unsigned long flags;
struct fcoe_wqe *sqe;
u16 sqe_idx;
fcport = io_req->fcport;
if (!fcport) {
@ -1940,12 +1767,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
init_completion(&io_req->tm_done);
/* Obtain free SQ entry */
spin_lock_irqsave(&fcport->rport_lock, flags);
qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
/* Ring doorbell */
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
io_req->task_params->sqe = sqe;
init_initiator_cleanup_fcoe_task(io_req->task_params);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
@ -1991,16 +1822,15 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
struct qedf_mp_req *tm_req;
struct fcoe_task_context *task;
struct fc_frame_header *fc_hdr;
struct fcp_cmnd *fcp_cmnd;
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
int rc = 0;
uint16_t xid;
uint32_t sid, did;
int tmo = 0;
unsigned long flags;
struct fcoe_wqe *sqe;
u16 sqe_idx;
if (!sc_cmd) {
QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
@ -2031,36 +1861,14 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Set the return CPU to be the same as the request one */
io_req->cpu = smp_processor_id();
tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
rc = qedf_init_mp_req(io_req);
if (rc == FAILED) {
QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
"failed\n");
kref_put(&io_req->refcount, qedf_release_cmd);
goto reset_tmf_err;
}
/* Set TM flags */
io_req->io_req_flags = 0;
tm_req->tm_flags = tm_flags;
io_req->io_req_flags = QEDF_READ;
io_req->data_xfer_len = 0;
io_req->tm_flags = tm_flags;
/* Default is to return a SCSI command when an error occurs */
io_req->return_scsi_cmd_on_abts = true;
/* Fill FCP_CMND */
qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
fcp_cmnd->fc_dl = 0;
/* Fill FC header */
fc_hdr = &(tm_req->req_fc_hdr);
sid = fcport->sid;
did = fcport->rdata->ids.port_id;
__fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = io_req->xid;
@ -2069,16 +1877,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(io_req, task);
init_completion(&io_req->tm_done);
/* Obtain free SQ entry */
spin_lock_irqsave(&fcport->rport_lock, flags);
qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
/* Ring doorbell */
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
qedf_init_task(fcport, lport, io_req, task, sqe);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
@ -2162,14 +1972,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
struct fcoe_cqe_rsp_info *fcp_rsp;
struct fcoe_cqe_midpath_info *mp_info;
/* Get TMF response length from CQE */
mp_info = &cqe->cqe_info.midpath_info;
io_req->mp_req.resp_len = mp_info->data_placement_size;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
"Response len is %d.\n", io_req->mp_req.resp_len);
fcp_rsp = &cqe->cqe_info.rsp_info;
qedf_parse_fcp_rsp(io_req, fcp_rsp);

View File

@ -1,5 +1,5 @@
obj-$(CONFIG_QEDI) := qedi.o
qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
qedi_dbg.o
qedi_dbg.o qedi_fw_api.o
qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,781 @@
/* QLogic iSCSI Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include <linux/types.h>
#include <asm/byteorder.h>
#include "qedi_hsi.h"
#include <linux/qed/qed_if.h>
#include "qedi_fw_iscsi.h"
#include "qedi_fw_scsi.h"
#define SCSI_NUM_SGES_IN_CACHE 0x4
static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
{
return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
}
static
void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
struct scsi_cached_sges *ctx_data_desc,
struct scsi_sgl_task_params *sgl_task_params)
{
u8 sge_index;
u8 num_sges;
u32 val;
num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
/* sgl params */
val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
ctx_sgl_params->sgl_addr.lo = val;
val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
ctx_sgl_params->sgl_addr.hi = val;
val = cpu_to_le32(sgl_task_params->total_buffer_size);
ctx_sgl_params->sgl_total_length = val;
ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
for (sge_index = 0; sge_index < num_sges; sge_index++) {
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
ctx_data_desc->sge[sge_index].sge_addr.lo = val;
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
ctx_data_desc->sge[sge_index].sge_addr.hi = val;
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
ctx_data_desc->sge[sge_index].sge_len = val;
}
}
static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
enum iscsi_task_type task_type,
struct scsi_sgl_task_params *sgl_task_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 io_size;
if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
task_type == ISCSI_TASK_TYPE_TARGET_READ)
io_size = task_params->tx_io_size;
else
io_size = task_params->rx_io_size;
if (!io_size)
return 0;
if (!dif_task_params)
return io_size;
return !dif_task_params->dif_on_network ?
io_size : sgl_task_params->total_buffer_size;
}
static void
init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
struct scsi_dif_task_params *dif_task_params)
{
if (!dif_task_params)
return;
SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
dif_task_params->dif_block_size_log);
SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
dif_task_params->dif_on_network ? 1 : 0);
SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
}
static void init_sqe(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *sgl_task_params,
struct scsi_dif_task_params *dif_task_params,
struct iscsi_common_hdr *pdu_header,
struct scsi_initiator_cmd_params *cmd_params,
enum iscsi_task_type task_type,
bool is_cleanup)
{
if (!task_params->sqe)
return;
memset(task_params->sqe, 0, sizeof(*task_params->sqe));
task_params->sqe->task_id = cpu_to_le16(task_params->itid);
if (is_cleanup) {
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
ISCSI_WQE_TYPE_TASK_CLEANUP);
return;
}
switch (task_type) {
case ISCSI_TASK_TYPE_INITIATOR_WRITE:
{
u32 buf_size = 0;
u32 num_sges = 0;
init_dif_context_flags(&task_params->sqe->prot_flags,
dif_task_params);
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
ISCSI_WQE_TYPE_NORMAL);
if (task_params->tx_io_size) {
buf_size = calc_rw_task_size(task_params, task_type,
sgl_task_params,
dif_task_params);
if (scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge))
num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
else
num_sges = min(sgl_task_params->num_sges,
(u16)SCSI_NUM_SGES_SLOW_SGL_THR);
}
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
buf_size);
if (GET_FIELD(pdu_header->hdr_second_dword,
ISCSI_CMD_HDR_TOTAL_AHS_LEN))
SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
cmd_params->extended_cdb_sge.sge_len);
}
break;
case ISCSI_TASK_TYPE_INITIATOR_READ:
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
ISCSI_WQE_TYPE_NORMAL);
if (GET_FIELD(pdu_header->hdr_second_dword,
ISCSI_CMD_HDR_TOTAL_AHS_LEN))
SET_FIELD(task_params->sqe->contlen_cdbsize,
ISCSI_WQE_CDB_SIZE,
cmd_params->extended_cdb_sge.sge_len);
break;
case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
case ISCSI_TASK_TYPE_MIDPATH:
{
bool advance_statsn = true;
if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
ISCSI_WQE_TYPE_LOGIN);
else
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
ISCSI_WQE_TYPE_MIDDLE_PATH);
if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
ISCSI_COMMON_HDR_OPCODE);
if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
(opcode != ISCSI_OPCODE_NOP_IN ||
pdu_header->itt == ISCSI_TTT_ALL_ONES))
advance_statsn = false;
}
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
advance_statsn ? 1 : 0);
if (task_params->tx_io_size) {
SET_FIELD(task_params->sqe->contlen_cdbsize,
ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
if (scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge))
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
ISCSI_WQE_NUM_SGES_SLOWIO);
else
SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
min(sgl_task_params->num_sges,
(u16)SCSI_NUM_SGES_SLOW_SGL_THR));
}
}
break;
default:
break;
}
}
static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header,
enum iscsi_task_type task_type)
{
struct iscsi_task_context *context;
u16 index;
u32 val;
context = task_params->context;
memset(context, 0, sizeof(*context));
for (index = 0; index <
ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
index++) {
val = cpu_to_le32(pdu_header->data[index]);
context->ystorm_st_context.pdu_hdr.data.data[index] = val;
}
context->mstorm_st_context.task_type = task_type;
context->mstorm_ag_context.task_cid =
cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1,
USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
}
static
void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
struct scsi_initiator_cmd_params *cmd)
{
union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
u32 val;
if (!cmd->extended_cdb_sge.sge_len)
return;
SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
cmd->extended_cdb_sge.sge_len);
val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val;
}
static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
u32 remaining_recv_len,
u32 expected_data_transfer_len,
u8 num_sges, bool tx_dif_conn_err_en)
{
u32 val;
ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
val = cpu_to_le32(expected_data_transfer_len);
ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2,
USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0);
}
static
void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type,
u32 task_size,
u32 exp_data_transfer_len,
u8 total_ahs_length)
{
u32 max_unsolicited_data = 0, val;
if (total_ahs_length &&
(task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
SET_FIELD(context->ustorm_st_context.flags2,
USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
switch (task_type) {
case ISCSI_TASK_TYPE_INITIATOR_WRITE:
if (!conn_params->initial_r2t)
max_unsolicited_data = conn_params->first_burst_length;
else if (conn_params->immediate_data)
max_unsolicited_data =
min(conn_params->first_burst_length,
conn_params->max_send_pdu_length);
context->ustorm_ag_context.exp_data_acked =
cpu_to_le32(total_ahs_length == 0 ?
min(exp_data_transfer_len,
max_unsolicited_data) :
((u32)(total_ahs_length +
ISCSI_AHS_CNTL_SIZE)));
break;
case ISCSI_TASK_TYPE_TARGET_READ:
val = cpu_to_le32(exp_data_transfer_len);
context->ustorm_ag_context.exp_data_acked = val;
break;
case ISCSI_TASK_TYPE_INITIATOR_READ:
context->ustorm_ag_context.exp_data_acked =
cpu_to_le32((total_ahs_length == 0 ? 0 :
total_ahs_length +
ISCSI_AHS_CNTL_SIZE));
break;
case ISCSI_TASK_TYPE_TARGET_WRITE:
val = cpu_to_le32(task_size);
context->ustorm_ag_context.exp_cont_len = val;
break;
default:
break;
}
}
static
void init_rtdif_task_context(struct rdif_task_context *rdif_context,
struct tdif_task_context *tdif_context,
struct scsi_dif_task_params *dif_task_params,
enum iscsi_task_type task_type)
{
u32 val;
if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
return;
if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
rdif_context->app_tag_value =
cpu_to_le16(dif_task_params->application_tag);
rdif_context->partial_crc_value = cpu_to_le16(0xffff);
val = cpu_to_le32(dif_task_params->initial_ref_tag);
rdif_context->initial_ref_tag = val;
rdif_context->app_tag_mask =
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
dif_task_params->host_guard_type);
SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_PROTECTIONTYPE,
dif_task_params->protection_type);
SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEGUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEREFTAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_HOSTINTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_NETWORKINTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDGUARD,
dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDAPPTAG,
dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDREFTAG,
dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_INTERVALSIZE,
dif_task_params->dif_block_size_log - 9);
SET_FIELD(rdif_context->state,
RDIF_TASK_CONTEXT_REFTAGMASK,
dif_task_params->ref_tag_mask);
SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
dif_task_params->ignore_app_tag);
}
if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
tdif_context->app_tag_value =
cpu_to_le16(dif_task_params->application_tag);
tdif_context->partial_crc_valueB =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
tdif_context->partial_crc_value_a =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_SETERRORWITHEOP,
dif_task_params->tx_dif_conn_err_en ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
dif_task_params->dif_block_size_log - 9);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_NETWORKINTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
val = cpu_to_le32(dif_task_params->initial_ref_tag);
tdif_context->initial_ref_tag = val;
tdif_context->app_tag_mask =
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
dif_task_params->host_guard_type);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_PROTECTIONTYPE,
dif_task_params->protection_type);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATEREFTAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_REFTAGMASK,
dif_task_params->ref_tag_mask);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_IGNOREAPPTAG,
dif_task_params->ignore_app_tag ? 1 : 0);
}
}
static void set_local_completion_context(struct iscsi_task_context *context)
{
SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
SET_FIELD(context->ustorm_st_context.flags,
USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
}
static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
enum iscsi_task_type task_type,
struct iscsi_conn_params *conn_params,
struct iscsi_common_hdr *pdu_header,
struct scsi_sgl_task_params *sgl_task_params,
struct scsi_initiator_cmd_params *cmd_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 exp_data_transfer_len = conn_params->max_burst_length;
struct iscsi_task_context *cxt;
bool slow_io = false;
u32 task_size, val;
u8 num_sges = 0;
task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
dif_task_params);
init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
task_type);
cxt = task_params->context;
val = cpu_to_le32(task_size);
cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
cmd_params);
val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
cxt->mstorm_st_context.sense_db.lo = val;
val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
cxt->mstorm_st_context.sense_db.hi = val;
if (task_params->tx_io_size) {
init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
dif_task_params);
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
&cxt->ystorm_st_context.state.data_desc,
sgl_task_params);
slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge);
num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
(u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
ISCSI_WQE_NUM_SGES_SLOWIO;
if (slow_io) {
SET_FIELD(cxt->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
}
} else if (task_params->rx_io_size) {
init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
dif_task_params);
init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
&cxt->mstorm_st_context.data_desc,
sgl_task_params);
num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge) ?
min_t(u16, sgl_task_params->num_sges,
(u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
ISCSI_WQE_NUM_SGES_SLOWIO;
cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
}
if (exp_data_transfer_len > task_size ||
task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
exp_data_transfer_len = task_size;
init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
&task_params->context->ustorm_ag_context,
task_size, exp_data_transfer_len, num_sges,
dif_task_params ?
dif_task_params->tx_dif_conn_err_en : false);
set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
task_type, task_size,
exp_data_transfer_len,
GET_FIELD(pdu_header->hdr_second_dword,
ISCSI_CMD_HDR_TOTAL_AHS_LEN));
if (dif_task_params)
init_rtdif_task_context(&task_params->context->rdif_context,
&task_params->context->tdif_context,
dif_task_params, task_type);
init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
cmd_params, task_type, false);
return 0;
}
int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
struct iscsi_conn_params *conn_params,
struct scsi_initiator_cmd_params *cmd_params,
struct iscsi_cmd_hdr *cmd_header,
struct scsi_sgl_task_params *tx_sgl_params,
struct scsi_sgl_task_params *rx_sgl_params,
struct scsi_dif_task_params *dif_task_params)
{
if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
return init_rw_iscsi_task(task_params,
ISCSI_TASK_TYPE_INITIATOR_WRITE,
conn_params,
(struct iscsi_common_hdr *)cmd_header,
tx_sgl_params, cmd_params,
dif_task_params);
else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
return init_rw_iscsi_task(task_params,
ISCSI_TASK_TYPE_INITIATOR_READ,
conn_params,
(struct iscsi_common_hdr *)cmd_header,
rx_sgl_params, cmd_params,
dif_task_params);
else
return -1;
}
int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct iscsi_login_req_hdr *login_header,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
struct iscsi_task_context *cxt;
cxt = task_params->context;
init_default_iscsi_task(task_params,
(struct data_hdr *)login_header,
ISCSI_TASK_TYPE_MIDPATH);
init_ustorm_task_contexts(&cxt->ustorm_st_context,
&cxt->ustorm_ag_context,
task_params->rx_io_size ?
rx_params->total_buffer_size : 0,
task_params->tx_io_size ?
tx_params->total_buffer_size : 0, 0,
0);
if (task_params->tx_io_size)
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
&cxt->ystorm_st_context.state.data_desc,
tx_params);
if (task_params->rx_io_size)
init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
&cxt->mstorm_st_context.data_desc,
rx_params);
cxt->mstorm_st_context.rem_task_size =
cpu_to_le32(task_params->rx_io_size ?
rx_params->total_buffer_size : 0);
init_sqe(task_params, tx_params, NULL,
(struct iscsi_common_hdr *)login_header, NULL,
ISCSI_TASK_TYPE_MIDPATH, false);
return 0;
}
int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct iscsi_nop_out_hdr *nop_out_pdu_header,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params)
{
struct iscsi_task_context *cxt;
cxt = task_params->context;
init_default_iscsi_task(task_params,
(struct data_hdr *)nop_out_pdu_header,
ISCSI_TASK_TYPE_MIDPATH);
if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
set_local_completion_context(task_params->context);
if (task_params->tx_io_size)
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
&cxt->ystorm_st_context.state.data_desc,
tx_sgl_task_params);
if (task_params->rx_io_size)
init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
&cxt->mstorm_st_context.data_desc,
rx_sgl_task_params);
init_ustorm_task_contexts(&cxt->ustorm_st_context,
&cxt->ustorm_ag_context,
task_params->rx_io_size ?
rx_sgl_task_params->total_buffer_size : 0,
task_params->tx_io_size ?
tx_sgl_task_params->total_buffer_size : 0,
0, 0);
cxt->mstorm_st_context.rem_task_size =
cpu_to_le32(task_params->rx_io_size ?
rx_sgl_task_params->total_buffer_size :
0);
init_sqe(task_params, tx_sgl_task_params, NULL,
(struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
ISCSI_TASK_TYPE_MIDPATH, false);
return 0;
}
int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct iscsi_logout_req_hdr *logout_hdr,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
struct iscsi_task_context *cxt;
cxt = task_params->context;
init_default_iscsi_task(task_params,
(struct data_hdr *)logout_hdr,
ISCSI_TASK_TYPE_MIDPATH);
if (task_params->tx_io_size)
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
&cxt->ystorm_st_context.state.data_desc,
tx_params);
if (task_params->rx_io_size)
init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
&cxt->mstorm_st_context.data_desc,
rx_params);
init_ustorm_task_contexts(&cxt->ustorm_st_context,
&cxt->ustorm_ag_context,
task_params->rx_io_size ?
rx_params->total_buffer_size : 0,
task_params->tx_io_size ?
tx_params->total_buffer_size : 0,
0, 0);
cxt->mstorm_st_context.rem_task_size =
cpu_to_le32(task_params->rx_io_size ?
rx_params->total_buffer_size : 0);
init_sqe(task_params, tx_params, NULL,
(struct iscsi_common_hdr *)logout_hdr, NULL,
ISCSI_TASK_TYPE_MIDPATH, false);
return 0;
}
int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
struct iscsi_tmf_request_hdr *tmf_header)
{
init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
ISCSI_TASK_TYPE_MIDPATH);
init_sqe(task_params, NULL, NULL,
(struct iscsi_common_hdr *)tmf_header, NULL,
ISCSI_TASK_TYPE_MIDPATH, false);
return 0;
}
int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct iscsi_text_request_hdr *text_header,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
struct iscsi_task_context *cxt;
cxt = task_params->context;
init_default_iscsi_task(task_params,
(struct data_hdr *)text_header,
ISCSI_TASK_TYPE_MIDPATH);
if (task_params->tx_io_size)
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
&cxt->ystorm_st_context.state.data_desc,
tx_params);
if (task_params->rx_io_size)
init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
&cxt->mstorm_st_context.data_desc,
rx_params);
cxt->mstorm_st_context.rem_task_size =
cpu_to_le32(task_params->rx_io_size ?
rx_params->total_buffer_size : 0);
init_ustorm_task_contexts(&cxt->ustorm_st_context,
&cxt->ustorm_ag_context,
task_params->rx_io_size ?
rx_params->total_buffer_size : 0,
task_params->tx_io_size ?
tx_params->total_buffer_size : 0, 0, 0);
init_sqe(task_params, tx_params, NULL,
(struct iscsi_common_hdr *)text_header, NULL,
ISCSI_TASK_TYPE_MIDPATH, false);
return 0;
}
int init_cleanup_task(struct iscsi_task_params *task_params)
{
init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
true);
return 0;
}

View File

@ -0,0 +1,117 @@
/*
* QLogic iSCSI Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QEDI_FW_ISCSI_H_
#define _QEDI_FW_ISCSI_H_
#include "qedi_fw_scsi.h"
struct iscsi_task_params {
struct iscsi_task_context *context;
struct iscsi_wqe *sqe;
u32 tx_io_size;
u32 rx_io_size;
u16 conn_icid;
u16 itid;
u8 cq_rss_number;
};
struct iscsi_conn_params {
u32 first_burst_length;
u32 max_send_pdu_length;
u32 max_burst_length;
bool initial_r2t;
bool immediate_data;
};
/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read
* task context.
*
* @param task_params - Pointer to task parameters struct
* @param conn_params - Connection Parameters
* @param cmd_params - command specific parameters
* @param cmd_pdu_header - PDU Header Parameters
* @param sgl_task_params - Pointer to SGL task params
* @param dif_task_params - Pointer to DIF parameters struct
*/
int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
struct iscsi_conn_params *conn_params,
struct scsi_initiator_cmd_params *cmd_params,
struct iscsi_cmd_hdr *cmd_pdu_header,
struct scsi_sgl_task_params *tx_sgl_params,
struct scsi_sgl_task_params *rx_sgl_params,
struct scsi_dif_task_params *dif_task_params);
/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login
* Request task context.
*
* @param task_params - Pointer to task parameters struct
* @param login_req_pdu_header - PDU Header Parameters
* @param tx_sgl_task_params - Pointer to SGL task params
* @param rx_sgl_task_params - Pointer to SGL task params
*/
int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct iscsi_login_req_hdr *login_header,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params);
/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out
* task context.
*
* @param task_params - Pointer to task parameters struct
* @param nop_out_pdu_header - PDU Header Parameters
* @param tx_sgl_task_params - Pointer to SGL task params
* @param rx_sgl_task_params - Pointer to SGL task params
*/
int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct iscsi_nop_out_hdr *nop_out_pdu_header,
struct scsi_sgl_task_params *tx_sgl_params,
struct scsi_sgl_task_params *rx_sgl_params);
/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator
* Logout Request task context.
*
* @param task_params - Pointer to task parameters struct
* @param logout_pdu_header - PDU Header Parameters
* @param tx_sgl_task_params - Pointer to SGL task params
* @param rx_sgl_task_params - Pointer to SGL task params
*/
int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct iscsi_logout_req_hdr *logout_hdr,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params);
/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF
* task context.
*
* @param task_params - Pointer to task parameters struct
* @param tmf_pdu_header - PDU Header Parameters
*/
int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
struct iscsi_tmf_request_hdr *tmf_header);
/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text
* Request task context.
*
* @param task_params - Pointer to task parameters struct
* @param text_request_pdu_header - PDU Header Parameters
* @param tx_sgl_task_params - Pointer to Tx SGL task params
* @param rx_sgl_task_params - Pointer to Rx SGL task params
*/
int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct iscsi_text_request_hdr *text_header,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params);
/* @brief init_cleanup_task - initializes Clean task (SQE)
*
* @param task_params - Pointer to task parameters struct
*/
int init_cleanup_task(struct iscsi_task_params *task_params);
#endif

View File

@ -0,0 +1,55 @@
/*
* QLogic iSCSI Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QEDI_FW_SCSI_H_
#define _QEDI_FW_SCSI_H_
#include <linux/types.h>
#include <asm/byteorder.h>
#include "qedi_hsi.h"
#include <linux/qed/qed_if.h>
struct scsi_sgl_task_params {
struct scsi_sge *sgl;
struct regpair sgl_phys_addr;
u32 total_buffer_size;
u16 num_sges;
bool small_mid_sge;
};
struct scsi_dif_task_params {
u32 initial_ref_tag;
bool initial_ref_tag_is_valid;
u16 application_tag;
u16 application_tag_mask;
u16 dif_block_size_log;
bool dif_on_network;
bool dif_on_host;
u8 host_guard_type;
u8 protection_type;
u8 ref_tag_mask;
bool crc_seed;
bool tx_dif_conn_err_en;
bool ignore_app_tag;
bool keep_ref_tag_const;
bool validate_guard;
bool validate_app_tag;
bool validate_ref_tag;
bool forward_guard;
bool forward_app_tag;
bool forward_ref_tag;
bool forward_app_tag_with_mask;
bool forward_ref_tag_with_mask;
};
struct scsi_initiator_cmd_params {
struct scsi_sge extended_cdb_sge;
struct regpair sense_data_buffer_phys_addr;
};
#endif

View File

@ -175,7 +175,7 @@ static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
if (cmd->io_tbl.sge_tbl)
dma_free_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
sizeof(struct iscsi_sge),
sizeof(struct scsi_sge),
cmd->io_tbl.sge_tbl,
cmd->io_tbl.sge_tbl_dma);
@ -191,7 +191,7 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
struct qedi_cmd *cmd)
{
struct qedi_io_bdt *io = &cmd->io_tbl;
struct iscsi_sge *sge;
struct scsi_sge *sge;
io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
@ -708,22 +708,20 @@ static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
{
struct iscsi_sge *bd_tbl;
struct scsi_sge *bd_tbl;
bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
qedi_conn->gen_pdu.req_buf;
bd_tbl->reserved0 = 0;
bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
bd_tbl->reserved0 = 0;
}
static int qedi_iscsi_send_generic_request(struct iscsi_task *task)

View File

@ -102,7 +102,7 @@ struct qedi_endpoint {
#define QEDI_SQ_WQES_MIN 16
struct qedi_io_bdt {
struct iscsi_sge *sge_tbl;
struct scsi_sge *sge_tbl;
dma_addr_t sge_tbl_dma;
u16 sge_valid;
};

View File

@ -7,8 +7,8 @@
* this source tree.
*/
#define QEDI_MODULE_VERSION "8.10.3.0"
#define QEDI_MODULE_VERSION "8.10.4.0"
#define QEDI_DRIVER_MAJOR_VER 8
#define QEDI_DRIVER_MINOR_VER 10
#define QEDI_DRIVER_REV_VER 3
#define QEDI_DRIVER_REV_VER 4
#define QEDI_DRIVER_ENG_VER 0

View File

@ -100,8 +100,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 10
#define FW_REVISION_VERSION 10
#define FW_MINOR_VERSION 15
#define FW_REVISION_VERSION 3
#define FW_ENGINEERING_VERSION 0
/***********************/
@ -187,6 +187,9 @@
/* DEMS */
#define DQ_DEMS_LEGACY 0
#define DQ_DEMS_TOE_MORE_TO_SEND 3
#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
#define DQ_DEMS_ROCE_CQ_CONS 7
/* XCM agg val selection */
#define DQ_XCM_AGG_VAL_SEL_WORD2 0
@ -214,6 +217,9 @@
#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@ -269,6 +275,8 @@
#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@ -285,6 +293,9 @@
#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3)
#define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
#define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
/* TCM agg counter flag selection (HW) */
#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
@ -301,6 +312,9 @@
#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
@ -689,6 +703,16 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
struct rdma_eqe_destroy_qp {
__le32 cid;
u8 reserved[4];
};
union rdma_eqe_data {
struct regpair async_handle;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
};
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
@ -705,9 +729,9 @@ union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct iscsi_eqe_data iscsi_info;
union rdma_eqe_data rdma_data;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
struct regpair roce_handle;
};
/* Event Ring Entry */

View File

@ -49,6 +49,9 @@
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255

View File

@ -109,13 +109,6 @@ struct fcoe_conn_terminate_ramrod_data {
struct regpair terminate_params_addr;
};
struct fcoe_fast_sgl_ctx {
struct regpair sgl_start_addr;
__le32 sgl_byte_offset;
__le16 task_reuse_cnt;
__le16 init_offset_in_first_sge;
};
struct fcoe_slow_sgl_ctx {
struct regpair base_sgl_addr;
__le16 curr_sge_off;
@ -124,23 +117,16 @@ struct fcoe_slow_sgl_ctx {
__le16 reserved;
};
struct fcoe_sge {
struct regpair sge_addr;
__le16 size;
__le16 reserved0;
u8 reserved1[3];
u8 is_valid_sge;
};
union fcoe_data_desc_ctx {
struct fcoe_fast_sgl_ctx fast;
struct fcoe_slow_sgl_ctx slow;
struct fcoe_sge single_sge;
};
union fcoe_dix_desc_ctx {
struct fcoe_slow_sgl_ctx dix_sgl;
struct fcoe_sge cached_dix_sge;
struct scsi_sge cached_dix_sge;
};
struct fcoe_fast_sgl_ctx {
struct regpair sgl_start_addr;
__le32 sgl_byte_offset;
__le16 task_reuse_cnt;
__le16 init_offset_in_first_sge;
};
struct fcoe_fcp_cmd_payload {
@ -172,57 +158,6 @@ enum fcoe_mode_type {
MAX_FCOE_MODE_TYPE
};
struct fcoe_mstorm_fcoe_task_st_ctx_fp {
__le16 flags;
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
__le16 difDataResidue;
__le16 parent_id;
__le16 single_sge_saved_offset;
__le32 data_2_trns_rem;
__le32 offset_in_io;
union fcoe_dix_desc_ctx dix_desc;
union fcoe_data_desc_ctx data_desc;
};
struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
__le16 flags;
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15
u8 tx_rx_sgl_mode;
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3
#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6
u8 rsrv2;
__le32 num_prm_zero_read;
struct regpair rsp_buf_addr;
};
struct fcoe_rx_stat {
struct regpair fcoe_rx_byte_cnt;
struct regpair fcoe_rx_data_pkt_cnt;
@ -236,16 +171,6 @@ struct fcoe_rx_stat {
__le32 rsrv;
};
enum fcoe_sgl_mode {
FCOE_SLOW_SGL,
FCOE_SINGLE_FAST_SGE,
FCOE_2_FAST_SGE,
FCOE_3_FAST_SGE,
FCOE_4_FAST_SGE,
FCOE_MUL_FAST_SGES,
MAX_FCOE_SGL_MODE
};
struct fcoe_stat_ramrod_data {
struct regpair stat_params_addr;
};
@ -328,22 +253,24 @@ union fcoe_tx_info_union_ctx {
struct ystorm_fcoe_task_st_ctx {
u8 task_type;
u8 sgl_mode;
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F
#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3
#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
u8 cached_dix_sge;
u8 expect_first_xfer;
__le32 num_pbf_zero_write;
union protection_info_union_ctx protection_info_union;
__le32 data_2_trns_rem;
struct scsi_sgl_params sgl_params;
u8 reserved1[12];
union fcoe_tx_info_union_ctx tx_info_union;
union fcoe_dix_desc_ctx dix_desc;
union fcoe_data_desc_ctx data_desc;
struct scsi_cached_sges data_desc;
__le16 ox_id;
__le16 rx_id;
__le32 task_rety_identifier;
__le32 reserved1[2];
u8 reserved2[8];
};
struct ystorm_fcoe_task_ag_ctx {
@ -484,22 +411,22 @@ struct tstorm_fcoe_task_ag_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
__le16 flags;
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
__le16 seq_cnt;
u8 seq_id;
u8 ooo_rx_seq_id;
@ -582,8 +509,34 @@ struct mstorm_fcoe_task_ag_ctx {
};
struct mstorm_fcoe_task_st_ctx {
struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
struct regpair rsp_buf_addr;
__le32 rsrv[2];
struct scsi_sgl_params sgl_params;
__le32 data_2_trns_rem;
__le32 data_buffer_offset;
__le16 parent_id;
__le16 flags;
#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
struct scsi_cached_sges data_desc;
};
struct ustorm_fcoe_task_ag_ctx {
@ -646,6 +599,7 @@ struct ustorm_fcoe_task_ag_ctx {
struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
@ -668,20 +622,20 @@ struct fcoe_tx_stat {
struct fcoe_wqe {
__le16 task_id;
__le16 flags;
#define FCOE_WQE_REQ_TYPE_MASK 0xF
#define FCOE_WQE_REQ_TYPE_SHIFT 0
#define FCOE_WQE_SGL_MODE_MASK 0x7
#define FCOE_WQE_SGL_MODE_SHIFT 4
#define FCOE_WQE_CONTINUATION_MASK 0x1
#define FCOE_WQE_CONTINUATION_SHIFT 7
#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1
#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
#define FCOE_WQE_SUPER_IO_MASK 0x1
#define FCOE_WQE_SUPER_IO_SHIFT 9
#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10
#define FCOE_WQE_RESERVED0_MASK 0x1F
#define FCOE_WQE_RESERVED0_SHIFT 11
#define FCOE_WQE_REQ_TYPE_MASK 0xF
#define FCOE_WQE_REQ_TYPE_SHIFT 0
#define FCOE_WQE_SGL_MODE_MASK 0x1
#define FCOE_WQE_SGL_MODE_SHIFT 4
#define FCOE_WQE_CONTINUATION_MASK 0x1
#define FCOE_WQE_CONTINUATION_SHIFT 5
#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
#define FCOE_WQE_RESERVED_MASK 0x1
#define FCOE_WQE_RESERVED_SHIFT 7
#define FCOE_WQE_NUM_SGES_MASK 0xF
#define FCOE_WQE_NUM_SGES_SHIFT 8
#define FCOE_WQE_RESERVED1_MASK 0xF
#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union;
};

View File

@ -39,17 +39,9 @@
/* iSCSI HSI constants */
#define ISCSI_DEFAULT_MTU (1500)
/* Current iSCSI HSI version number composed of two fields (16 bit) */
#define ISCSI_HSI_MAJOR_VERSION (0)
#define ISCSI_HSI_MINOR_VERSION (0)
/* KWQ (kernel work queue) layer codes */
#define ISCSI_SLOW_PATH_LAYER_CODE (6)
/* CQE completion status */
#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
#define ISCSI_EQE_RST_CONN_RCVD (0x1)
/* iSCSI parameter defaults */
#define ISCSI_DEFAULT_HEADER_DIGEST (0)
#define ISCSI_DEFAULT_DATA_DIGEST (0)
@ -68,6 +60,10 @@
#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
#define ISCSI_AHS_CNTL_SIZE 4
#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
/* iSCSI reserved params */
#define ISCSI_ITT_ALL_ONES (0xffffffff)
#define ISCSI_TTT_ALL_ONES (0xffffffff)
@ -173,19 +169,6 @@ struct iscsi_async_msg_hdr {
__le32 reserved7;
};
struct iscsi_sge {
struct regpair sge_addr;
__le16 sge_len;
__le16 reserved0;
__le32 reserved1;
};
struct iscsi_cached_sge_ctx {
struct iscsi_sge sge;
struct regpair reserved;
__le32 dsgl_curr_offset[2];
};
struct iscsi_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
@ -229,8 +212,13 @@ struct iscsi_common_hdr {
#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
__le32 lun_reserved[4];
__le32 data[6];
struct regpair lun_reserved;
__le32 itt;
__le32 ttt;
__le32 cmdstat_sn;
__le32 exp_statcmd_sn;
__le32 max_cmd_sn;
__le32 data[3];
};
struct iscsi_conn_offload_params {
@ -246,8 +234,10 @@ struct iscsi_conn_offload_params {
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
u8 pbl_page_size_log;
u8 pbe_page_size_log;
u8 default_cq;
@ -278,8 +268,12 @@ struct iscsi_conn_update_ramrod_params {
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
u8 reserved0[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
@ -312,7 +306,7 @@ struct iscsi_ext_cdb_cmd_hdr {
__le32 expected_transfer_length;
__le32 cmd_sn;
__le32 exp_stat_sn;
struct iscsi_sge cdb_sge;
struct scsi_sge cdb_sge;
};
struct iscsi_login_req_hdr {
@ -519,8 +513,8 @@ struct iscsi_logout_response_hdr {
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 reserved4;
__le16 time2retain;
__le16 time2wait;
__le16 time_2_retain;
__le16 time_2_wait;
__le32 reserved5[1];
};
@ -602,7 +596,7 @@ struct iscsi_tmf_response_hdr {
#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 itt;
__le32 rtt;
__le32 reserved1;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
@ -641,7 +635,7 @@ struct iscsi_reject_hdr {
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 reserved1;
__le32 all_ones;
__le32 reserved2;
__le32 stat_sn;
__le32 exp_cmd_sn;
@ -688,7 +682,9 @@ struct iscsi_cqe_solicited {
__le16 itid;
u8 task_type;
u8 fw_dbg_field;
__le32 reserved1[2];
u8 caused_conn_err;
u8 reserved0[3];
__le32 reserved1[1];
union iscsi_task_hdr iscsi_hdr;
};
@ -727,35 +723,6 @@ enum iscsi_cqe_unsolicited_type {
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
struct iscsi_virt_sgl_ctx {
struct regpair sgl_base;
struct regpair dsgl_base;
__le32 sgl_initial_offset;
__le32 dsgl_initial_offset;
__le32 dsgl_curr_offset[2];
};
struct iscsi_sgl_var_params {
u8 sgl_ptr;
u8 dsgl_ptr;
__le16 sge_offset;
__le16 dsge_offset;
};
struct iscsi_phys_sgl_ctx {
struct regpair sgl_base;
struct regpair dsgl_base;
u8 sgl_size;
u8 dsgl_size;
__le16 reserved;
struct iscsi_sgl_var_params var_params[2];
};
union iscsi_data_desc_ctx {
struct iscsi_virt_sgl_ctx virt_sgl;
struct iscsi_phys_sgl_ctx phys_sgl;
struct iscsi_cached_sge_ctx cached_sge;
};
struct iscsi_debug_modes {
u8 flags;
@ -771,8 +738,10 @@ struct iscsi_debug_modes {
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3
#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
};
struct iscsi_dif_flags {
@ -806,7 +775,6 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
MAX_ISCSI_EQE_OPCODE
};
@ -856,31 +824,11 @@ enum iscsi_error_types {
ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
ISCSI_CONN_ERROR_INVALID_ITT,
ISCSI_ERROR_UNKNOWN,
MAX_ISCSI_ERROR_TYPES
};
struct iscsi_mflags {
u8 mflags;
#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1
#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0
#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1
#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
#define ISCSI_MFLAGS_RESERVED_MASK 0x3F
#define ISCSI_MFLAGS_RESERVED_SHIFT 2
};
struct iscsi_sgl {
struct regpair sgl_addr;
__le16 updated_sge_size;
__le16 updated_sge_offset;
__le32 byte_offset;
};
union iscsi_mstorm_sgl {
struct iscsi_sgl sgl_struct;
struct iscsi_sge single_sge;
};
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
@ -896,10 +844,10 @@ enum iscsi_ramrod_cmd_id {
struct iscsi_reg1 {
__le32 reg1_map;
#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7
#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF
#define ISCSI_REG1_RESERVED1_SHIFT 3
#define ISCSI_REG1_NUM_SGES_MASK 0xF
#define ISCSI_REG1_NUM_SGES_SHIFT 0
#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
#define ISCSI_REG1_RESERVED1_SHIFT 4
};
union iscsi_seq_num {
@ -967,22 +915,33 @@ struct iscsi_spe_func_init {
};
struct ystorm_iscsi_task_state {
union iscsi_data_desc_ctx sgl_ctx_union;
__le32 buffer_offset[2];
__le16 bytes_nxt_dif;
__le16 rxmit_bytes_nxt_dif;
union iscsi_seq_num seq_num_union;
u8 dif_bytes_leftover;
u8 rxmit_dif_bytes_leftover;
__le16 reuse_count;
struct iscsi_dif_flags dif_flags;
u8 local_comp;
struct scsi_cached_sges data_desc;
struct scsi_sgl_params sgl_params;
__le32 exp_r2t_sn;
__le32 sgl_offset[2];
__le32 buffer_offset;
union iscsi_seq_num seq_num;
struct iscsi_dif_flags dif_flags;
u8 flags;
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
};
struct ystorm_iscsi_task_rxmit_opt {
__le32 fast_rxmit_sge_offset;
__le32 scan_start_buffer_offset;
__le32 fast_rxmit_buffer_offset;
u8 scan_start_sgl_index;
u8 fast_rxmit_sgl_index;
__le16 reserved;
};
struct ystorm_iscsi_task_st_ctx {
struct ystorm_iscsi_task_state state;
struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
union iscsi_task_hdr pdu_hdr;
};
@ -1152,25 +1111,16 @@ struct ustorm_iscsi_task_ag_ctx {
};
struct mstorm_iscsi_task_st_ctx {
union iscsi_mstorm_sgl sgl_union;
struct iscsi_dif_flags dif_flags;
struct iscsi_mflags flags;
u8 sgl_size;
u8 host_sge_index;
__le16 dix_cur_sge_offset;
__le16 dix_cur_sge_size;
__le32 data_offset_rtid;
u8 dif_offset;
u8 dix_sgl_size;
u8 dix_sge_index;
u8 task_type;
struct regpair sense_db;
struct regpair dix_sgl_cur_sge;
struct scsi_cached_sges data_desc;
struct scsi_sgl_params sgl_params;
__le32 rem_task_size;
__le16 reuse_count;
__le16 dif_data_residue;
u8 reserved0[4];
__le32 reserved1[1];
__le32 data_buffer_offset;
u8 task_type;
struct iscsi_dif_flags dif_flags;
u8 reserved0[2];
struct regpair sense_db;
__le32 expected_itt;
__le32 reserved1;
};
struct ustorm_iscsi_task_st_ctx {
@ -1184,7 +1134,7 @@ struct ustorm_iscsi_task_st_ctx {
#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
u8 reserved2;
struct iscsi_dif_flags dif_flags;
__le16 reserved3;
__le32 reserved4;
__le32 reserved5;
@ -1207,10 +1157,10 @@ struct ustorm_iscsi_task_st_ctx {
#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4
#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5
#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
@ -1220,7 +1170,6 @@ struct ustorm_iscsi_task_st_ctx {
struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
@ -1272,32 +1221,22 @@ struct iscsi_uhqe {
#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
struct iscsi_wqe_field {
__le32 contlen_cdbsize_field;
#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF
#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF
#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
};
union iscsi_wqe_field_union {
struct iscsi_wqe_field cont_field;
__le32 prev_tid;
};
struct iscsi_wqe {
__le16 task_id;
u8 flags;
#define ISCSI_WQE_WQE_TYPE_MASK 0x7
#define ISCSI_WQE_WQE_TYPE_SHIFT 0
#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7
#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3
#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1
#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
#define ISCSI_WQE_NUM_SGES_MASK 0xF
#define ISCSI_WQE_NUM_SGES_SHIFT 3
#define ISCSI_WQE_RESPONSE_MASK 0x1
#define ISCSI_WQE_RESPONSE_SHIFT 7
struct iscsi_dif_flags prot_flags;
union iscsi_wqe_field_union cont_prevtid_union;
__le32 contlen_cdbsize;
#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
#define ISCSI_WQE_CONT_LEN_SHIFT 0
#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
enum iscsi_wqe_type {
@ -1318,17 +1257,15 @@ struct iscsi_xhqe {
u8 total_ahs_length;
u8 opcode;
u8 flags;
#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7
#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
#define ISCSI_XHQE_FINAL_MASK 0x1
#define ISCSI_XHQE_FINAL_SHIFT 3
#define ISCSI_XHQE_SUPER_IO_MASK 0x1
#define ISCSI_XHQE_SUPER_IO_SHIFT 4
#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
#define ISCSI_XHQE_STATUS_BIT_SHIFT 5
#define ISCSI_XHQE_RESERVED_MASK 0x3
#define ISCSI_XHQE_RESERVED_SHIFT 6
union iscsi_seq_num seq_num_union;
#define ISCSI_XHQE_FINAL_MASK 0x1
#define ISCSI_XHQE_FINAL_SHIFT 0
#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
#define ISCSI_XHQE_NUM_SGES_MASK 0xF
#define ISCSI_XHQE_NUM_SGES_SHIFT 2
#define ISCSI_XHQE_RESERVED0_MASK 0x3
#define ISCSI_XHQE_RESERVED0_SHIFT 6
union iscsi_seq_num seq_num;
__le16 reserved1;
};

View File

@ -38,4 +38,21 @@
#define ROCE_MAX_QPS (32 * 1024)
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1,
ROCE_ASYNC_EVENT_SQ_DRAINED,
ROCE_ASYNC_EVENT_SRQ_LIMIT,
ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
ROCE_ASYNC_EVENT_CQ_ERR,
ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
#endif /* __ROCE_COMMON__ */

View File

@ -40,6 +40,8 @@
#define BDQ_ID_IMM_DATA (1)
#define BDQ_NUM_IDS (2)
#define SCSI_NUM_SGES_SLOW_SGL_THR 8
#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
struct scsi_bd {
@ -52,6 +54,16 @@ struct scsi_bdq_ram_drv_data {
__le16 reserved0[3];
};
struct scsi_sge {
struct regpair sge_addr;
__le32 sge_len;
__le32 reserved;
};
struct scsi_cached_sges {
struct scsi_sge sge[4];
};
struct scsi_drv_cmdq {
__le16 cmdq_cons;
__le16 reserved0;
@ -99,11 +111,19 @@ struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
};
struct scsi_sge {
struct regpair sge_addr;
__le16 sge_len;
__le16 reserved0;
__le32 reserved1;
enum scsi_sgl_mode {
SCSI_TX_SLOW_SGL,
SCSI_FAST_SGL,
MAX_SCSI_SGL_MODE
};
struct scsi_sgl_params {
struct regpair sgl_addr;
__le32 sgl_total_length;
__le32 sge_offset;
__le16 sgl_num_sges;
u8 sgl_index;
u8 reserved;
};
struct scsi_terminate_extra_params {

View File

@ -173,6 +173,7 @@ enum tcp_seg_placement_event {
TCP_EVENT_ADD_ISLE_RIGHT,
TCP_EVENT_ADD_ISLE_LEFT,
TCP_EVENT_JOIN,
TCP_EVENT_DELETE_ISLES,
TCP_EVENT_NOP,
MAX_TCP_SEG_PLACEMENT_EVENT
};