mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'cxgb4'
Karen Xie says: ==================== cxgb4/cxgbi: misc. fixes for cxgb4i This patch set fixes cxgb4i's tx credit calculation and adds handling of additional rx message and negative advice types. It also removes the duplicate code in cxgb4i to set the outgoing queues of a packet. Karen Xie (7): cxgb4i: fix tx immediate data credit check cxgb4i: fix credit check for tx_data_wr cxgb4/cxgb4i: set max. outgoing pdu length in the f/w cxgb4i: add more types of negative advice cxgb4i: handle non pdu-aligned rx data cxgb4i: use cxgb4's set_wr_txq() for setting outgoing queues libcxgbi: fix the debug print accessing skb after it is freed Sending to net as the fixes are mostly in the network area and it touches cxgb4's header file (t4fw_api.h). v2 corrects the "CHECK"s flagged by checkpatch.pl --strict. v3 splits the 3rd patch from v2 to two separate patches. Adds detailed commit messages and makes subject more concise. Patch 3/6 also changes the return value of is_neg_adv() from int to bool. v4 -- please ignore. v5 splits the 1st patch from v3 to two separate patches and reduces code duplication in make_tx_data_wr(). v6 removed the code style cleanup in the 2nd patch. The style update will be addressed in a separate patch. v7 updates the 7th patch with more detailed commit message. v8 removes the duplicate subject lines from the message bodies. v9 reformatted the commit messages to be max. 80 characters per line. v10 rebased to net-next tree. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
3a923f5a43
@ -560,6 +560,7 @@ enum fw_flowc_mnem {
|
||||
FW_FLOWC_MNEM_RCVNXT,
|
||||
FW_FLOWC_MNEM_SNDBUF,
|
||||
FW_FLOWC_MNEM_MSS,
|
||||
FW_FLOWC_MNEM_TXDATAPLEN_MAX,
|
||||
};
|
||||
|
||||
struct fw_flowc_mnemval {
|
||||
|
@ -75,6 +75,7 @@ typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
|
||||
static void *t4_uld_add(const struct cxgb4_lld_info *);
|
||||
static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
|
||||
static int t4_uld_state_change(void *, enum cxgb4_state state);
|
||||
static inline int send_tx_flowc_wr(struct cxgbi_sock *);
|
||||
|
||||
static const struct cxgb4_uld_info cxgb4i_uld_info = {
|
||||
.name = DRV_MODULE_NAME,
|
||||
@ -157,12 +158,6 @@ static struct scsi_transport_template *cxgb4i_stt;
|
||||
#define RCV_BUFSIZ_MASK 0x3FFU
|
||||
#define MAX_IMM_TX_PKT_LEN 128
|
||||
|
||||
static inline void set_queue(struct sk_buff *skb, unsigned int queue,
|
||||
const struct cxgbi_sock *csk)
|
||||
{
|
||||
skb->queue_mapping = queue;
|
||||
}
|
||||
|
||||
static int push_tx_frames(struct cxgbi_sock *, int);
|
||||
|
||||
/*
|
||||
@ -172,10 +167,14 @@ static int push_tx_frames(struct cxgbi_sock *, int);
|
||||
* Returns true if a packet can be sent as an offload WR with immediate
|
||||
* data. We currently use the same limit as for Ethernet packets.
|
||||
*/
|
||||
static inline int is_ofld_imm(const struct sk_buff *skb)
|
||||
static inline bool is_ofld_imm(const struct sk_buff *skb)
|
||||
{
|
||||
return skb->len <= (MAX_IMM_TX_PKT_LEN -
|
||||
sizeof(struct fw_ofld_tx_data_wr));
|
||||
int len = skb->len;
|
||||
|
||||
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
|
||||
len += sizeof(struct fw_ofld_tx_data_wr);
|
||||
|
||||
return len <= MAX_IMM_TX_PKT_LEN;
|
||||
}
|
||||
|
||||
static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
|
||||
@ -388,13 +387,19 @@ static void send_abort_req(struct cxgbi_sock *csk)
|
||||
|
||||
if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
|
||||
return;
|
||||
|
||||
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
|
||||
send_tx_flowc_wr(csk);
|
||||
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
|
||||
}
|
||||
|
||||
cxgbi_sock_set_state(csk, CTP_ABORTING);
|
||||
cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
|
||||
cxgbi_sock_purge_write_queue(csk);
|
||||
|
||||
csk->cpl_abort_req = NULL;
|
||||
req = (struct cpl_abort_req *)skb->head;
|
||||
set_queue(skb, CPL_PRIORITY_DATA, csk);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
|
||||
req->cmd = CPL_ABORT_SEND_RST;
|
||||
t4_set_arp_err_handler(skb, csk, abort_arp_failure);
|
||||
INIT_TP_WR(req, csk->tid);
|
||||
@ -420,7 +425,7 @@ static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
|
||||
csk, csk->state, csk->flags, csk->tid, rst_status);
|
||||
|
||||
csk->cpl_abort_rpl = NULL;
|
||||
set_queue(skb, CPL_PRIORITY_DATA, csk);
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
|
||||
INIT_TP_WR(rpl, csk->tid);
|
||||
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
|
||||
rpl->cmd = rst_status;
|
||||
@ -491,20 +496,40 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
|
||||
return flits + sgl_len(cnt);
|
||||
}
|
||||
|
||||
static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
|
||||
#define FLOWC_WR_NPARAMS_MIN 9
|
||||
static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
|
||||
{
|
||||
int nparams, flowclen16, flowclen;
|
||||
|
||||
nparams = FLOWC_WR_NPARAMS_MIN;
|
||||
flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
|
||||
flowclen16 = DIV_ROUND_UP(flowclen, 16);
|
||||
flowclen = flowclen16 * 16;
|
||||
/*
|
||||
* Return the number of 16-byte credits used by the FlowC request.
|
||||
* Pass back the nparams and actual FlowC length if requested.
|
||||
*/
|
||||
if (nparamsp)
|
||||
*nparamsp = nparams;
|
||||
if (flowclenp)
|
||||
*flowclenp = flowclen;
|
||||
|
||||
return flowclen16;
|
||||
}
|
||||
|
||||
static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct fw_flowc_wr *flowc;
|
||||
int flowclen, i;
|
||||
int nparams, flowclen16, flowclen;
|
||||
|
||||
flowclen = 80;
|
||||
flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
|
||||
skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
|
||||
flowc = (struct fw_flowc_wr *)skb->head;
|
||||
flowc->op_to_nparams =
|
||||
htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(8));
|
||||
htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
|
||||
flowc->flowid_len16 =
|
||||
htonl(FW_WR_LEN16_V(DIV_ROUND_UP(72, 16)) |
|
||||
FW_WR_FLOWID_V(csk->tid));
|
||||
htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
|
||||
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
|
||||
flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
|
||||
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
|
||||
@ -523,12 +548,10 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
|
||||
flowc->mnemval[7].val = htonl(csk->advmss);
|
||||
flowc->mnemval[8].mnemonic = 0;
|
||||
flowc->mnemval[8].val = 0;
|
||||
for (i = 0; i < 9; i++) {
|
||||
flowc->mnemval[i].r4[0] = 0;
|
||||
flowc->mnemval[i].r4[1] = 0;
|
||||
flowc->mnemval[i].r4[2] = 0;
|
||||
}
|
||||
set_queue(skb, CPL_PRIORITY_DATA, csk);
|
||||
flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
|
||||
flowc->mnemval[8].val = 16384;
|
||||
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
|
||||
|
||||
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
|
||||
"csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
|
||||
@ -537,6 +560,8 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
|
||||
csk->advmss);
|
||||
|
||||
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
|
||||
|
||||
return flowclen16;
|
||||
}
|
||||
|
||||
static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
|
||||
@ -545,10 +570,11 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
|
||||
struct fw_ofld_tx_data_wr *req;
|
||||
unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
|
||||
unsigned int wr_ulp_mode = 0, val;
|
||||
bool imm = is_ofld_imm(skb);
|
||||
|
||||
req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
|
||||
|
||||
if (is_ofld_imm(skb)) {
|
||||
if (imm) {
|
||||
req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
|
||||
FW_WR_COMPL_F |
|
||||
FW_WR_IMMDLEN_V(dlen));
|
||||
@ -597,16 +623,32 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
|
||||
int dlen = skb->len;
|
||||
int len = skb->len;
|
||||
unsigned int credits_needed;
|
||||
int flowclen16 = 0;
|
||||
|
||||
skb_reset_transport_header(skb);
|
||||
if (is_ofld_imm(skb))
|
||||
credits_needed = DIV_ROUND_UP(dlen +
|
||||
sizeof(struct fw_ofld_tx_data_wr), 16);
|
||||
credits_needed = DIV_ROUND_UP(dlen, 16);
|
||||
else
|
||||
credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
|
||||
+ sizeof(struct fw_ofld_tx_data_wr),
|
||||
credits_needed = DIV_ROUND_UP(
|
||||
8 * calc_tx_flits_ofld(skb),
|
||||
16);
|
||||
|
||||
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
|
||||
credits_needed += DIV_ROUND_UP(
|
||||
sizeof(struct fw_ofld_tx_data_wr),
|
||||
16);
|
||||
|
||||
/*
|
||||
* Assumes the initial credits is large enough to support
|
||||
* fw_flowc_wr plus largest possible first payload
|
||||
*/
|
||||
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
|
||||
flowclen16 = send_tx_flowc_wr(csk);
|
||||
csk->wr_cred -= flowclen16;
|
||||
csk->wr_una_cred += flowclen16;
|
||||
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
|
||||
}
|
||||
|
||||
if (csk->wr_cred < credits_needed) {
|
||||
log_debug(1 << CXGBI_DBG_PDU_TX,
|
||||
"csk 0x%p, skb %u/%u, wr %d < %u.\n",
|
||||
@ -615,8 +657,8 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
|
||||
break;
|
||||
}
|
||||
__skb_unlink(skb, &csk->write_queue);
|
||||
set_queue(skb, CPL_PRIORITY_DATA, csk);
|
||||
skb->csum = credits_needed;
|
||||
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
|
||||
skb->csum = credits_needed + flowclen16;
|
||||
csk->wr_cred -= credits_needed;
|
||||
csk->wr_una_cred += credits_needed;
|
||||
cxgbi_sock_enqueue_wr(csk, skb);
|
||||
@ -627,12 +669,6 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
|
||||
csk->wr_cred, csk->wr_una_cred);
|
||||
|
||||
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
|
||||
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
|
||||
send_tx_flowc_wr(csk);
|
||||
skb->csum += 5;
|
||||
csk->wr_cred -= 5;
|
||||
csk->wr_una_cred += 5;
|
||||
}
|
||||
len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
|
||||
make_tx_data_wr(csk, skb, dlen, len, credits_needed,
|
||||
req_completion);
|
||||
@ -807,6 +843,13 @@ static void csk_act_open_retry_timer(unsigned long data)
|
||||
|
||||
}
|
||||
|
||||
static inline bool is_neg_adv(unsigned int status)
|
||||
{
|
||||
return status == CPL_ERR_RTX_NEG_ADVICE ||
|
||||
status == CPL_ERR_KEEPALV_NEG_ADVICE ||
|
||||
status == CPL_ERR_PERSIST_NEG_ADVICE;
|
||||
}
|
||||
|
||||
static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
|
||||
{
|
||||
struct cxgbi_sock *csk;
|
||||
@ -828,7 +871,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
|
||||
"csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
|
||||
atid, tid, status, csk, csk->state, csk->flags);
|
||||
|
||||
if (status == CPL_ERR_RTX_NEG_ADVICE)
|
||||
if (is_neg_adv(status))
|
||||
goto rel_skb;
|
||||
|
||||
module_put(THIS_MODULE);
|
||||
@ -934,8 +977,7 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
|
||||
(&csk->saddr), (&csk->daddr),
|
||||
csk, csk->state, csk->flags, csk->tid, req->status);
|
||||
|
||||
if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
|
||||
req->status == CPL_ERR_PERSIST_NEG_ADVICE)
|
||||
if (is_neg_adv(req->status))
|
||||
goto rel_skb;
|
||||
|
||||
cxgbi_sock_get(csk);
|
||||
@ -989,6 +1031,27 @@ rel_skb:
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
|
||||
{
|
||||
struct cxgbi_sock *csk;
|
||||
struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
|
||||
unsigned int tid = GET_TID(cpl);
|
||||
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
|
||||
struct tid_info *t = lldi->tids;
|
||||
|
||||
csk = lookup_tid(t, tid);
|
||||
if (!csk) {
|
||||
pr_err("can't find connection for tid %u.\n", tid);
|
||||
} else {
|
||||
/* not expecting this, reset the connection. */
|
||||
pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
|
||||
spin_lock_bh(&csk->lock);
|
||||
send_abort_req(csk);
|
||||
spin_unlock_bh(&csk->lock);
|
||||
}
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
|
||||
{
|
||||
struct cxgbi_sock *csk;
|
||||
@ -1408,6 +1471,7 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
|
||||
[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
|
||||
[CPL_RX_DATA_DDP] = do_rx_data_ddp,
|
||||
[CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
|
||||
[CPL_RX_DATA] = do_rx_data,
|
||||
};
|
||||
|
||||
int cxgb4i_ofld_init(struct cxgbi_device *cdev)
|
||||
@ -1485,7 +1549,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
|
||||
return -ENOMEM;
|
||||
}
|
||||
req = (struct ulp_mem_io *)skb->head;
|
||||
set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
|
||||
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
|
||||
|
||||
ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
|
||||
idata = (struct ulptx_idata *)(req + 1);
|
||||
|
@ -2294,10 +2294,12 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
|
||||
return err;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
|
||||
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
|
||||
task->itt, skb, skb->len, skb->data_len, err);
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
|
||||
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
|
||||
return err;
|
||||
|
@ -317,8 +317,8 @@ static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb,
|
||||
__clear_bit(flag, &(cxgbi_skcb_flags(skb)));
|
||||
}
|
||||
|
||||
static inline int cxgbi_skcb_test_flag(struct sk_buff *skb,
|
||||
enum cxgbi_skcb_flags flag)
|
||||
static inline int cxgbi_skcb_test_flag(const struct sk_buff *skb,
|
||||
enum cxgbi_skcb_flags flag)
|
||||
{
|
||||
return test_bit(flag, &(cxgbi_skcb_flags(skb)));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user