mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-03 11:13:56 +08:00
IB/ipath: Use PIO buffer for RC ACKs
This reduces the latency for RC ACKs when a PIO buffer is available. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
c4b4d16e09
commit
d98b193776
@ -31,6 +31,8 @@
|
|||||||
* SOFTWARE.
|
* SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/io.h>
|
||||||
|
|
||||||
#include "ipath_verbs.h"
|
#include "ipath_verbs.h"
|
||||||
#include "ipath_kernel.h"
|
#include "ipath_kernel.h"
|
||||||
|
|
||||||
@ -585,19 +587,39 @@ bail:
|
|||||||
static void send_rc_ack(struct ipath_qp *qp)
|
static void send_rc_ack(struct ipath_qp *qp)
|
||||||
{
|
{
|
||||||
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
||||||
|
struct ipath_devdata *dd;
|
||||||
u16 lrh0;
|
u16 lrh0;
|
||||||
u32 bth0;
|
u32 bth0;
|
||||||
u32 hwords;
|
u32 hwords;
|
||||||
|
u32 __iomem *piobuf;
|
||||||
struct ipath_ib_header hdr;
|
struct ipath_ib_header hdr;
|
||||||
struct ipath_other_headers *ohdr;
|
struct ipath_other_headers *ohdr;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&qp->s_lock, flags);
|
||||||
|
|
||||||
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
|
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
|
||||||
if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
|
if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
|
||||||
(qp->s_flags & IPATH_S_ACK_PENDING) ||
|
(qp->s_flags & IPATH_S_ACK_PENDING) ||
|
||||||
qp->s_ack_state != OP(ACKNOWLEDGE))
|
qp->s_ack_state != OP(ACKNOWLEDGE))
|
||||||
goto queue_ack;
|
goto queue_ack;
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||||
|
|
||||||
|
dd = dev->dd;
|
||||||
|
piobuf = ipath_getpiobuf(dd, 0, NULL);
|
||||||
|
if (!piobuf) {
|
||||||
|
/*
|
||||||
|
* We are out of PIO buffers at the moment.
|
||||||
|
* Pass responsibility for sending the ACK to the
|
||||||
|
* send tasklet so that when a PIO buffer becomes
|
||||||
|
* available, the ACK is sent ahead of other outgoing
|
||||||
|
* packets.
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(&qp->s_lock, flags);
|
||||||
|
goto queue_ack;
|
||||||
|
}
|
||||||
|
|
||||||
/* Construct the header. */
|
/* Construct the header. */
|
||||||
ohdr = &hdr.u.oth;
|
ohdr = &hdr.u.oth;
|
||||||
lrh0 = IPATH_LRH_BTH;
|
lrh0 = IPATH_LRH_BTH;
|
||||||
@ -611,7 +633,7 @@ static void send_rc_ack(struct ipath_qp *qp)
|
|||||||
lrh0 = IPATH_LRH_GRH;
|
lrh0 = IPATH_LRH_GRH;
|
||||||
}
|
}
|
||||||
/* read pkey_index w/o lock (its atomic) */
|
/* read pkey_index w/o lock (its atomic) */
|
||||||
bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
|
bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
|
||||||
(OP(ACKNOWLEDGE) << 24) | (1 << 22);
|
(OP(ACKNOWLEDGE) << 24) | (1 << 22);
|
||||||
if (qp->r_nak_state)
|
if (qp->r_nak_state)
|
||||||
ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
|
ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
|
||||||
@ -623,30 +645,29 @@ static void send_rc_ack(struct ipath_qp *qp)
|
|||||||
hdr.lrh[0] = cpu_to_be16(lrh0);
|
hdr.lrh[0] = cpu_to_be16(lrh0);
|
||||||
hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
|
hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
|
||||||
hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
|
hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
|
||||||
hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
|
hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
|
||||||
ohdr->bth[0] = cpu_to_be32(bth0);
|
ohdr->bth[0] = cpu_to_be32(bth0);
|
||||||
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
|
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
|
||||||
ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
|
ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
|
||||||
|
|
||||||
/*
|
writeq(hwords + 1, piobuf);
|
||||||
* If we can send the ACK, clear the ACK state.
|
|
||||||
*/
|
|
||||||
if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
|
|
||||||
dev->n_unicast_xmit++;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
|
||||||
* We are out of PIO buffers at the moment.
|
u32 *hdrp = (u32 *) &hdr;
|
||||||
* Pass responsibility for sending the ACK to the
|
|
||||||
* send tasklet so that when a PIO buffer becomes
|
ipath_flush_wc();
|
||||||
* available, the ACK is sent ahead of other outgoing
|
__iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
|
||||||
* packets.
|
ipath_flush_wc();
|
||||||
*/
|
__raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
|
||||||
dev->n_rc_qacks++;
|
} else
|
||||||
|
__iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
|
||||||
|
|
||||||
|
ipath_flush_wc();
|
||||||
|
|
||||||
|
dev->n_unicast_xmit++;
|
||||||
|
goto done;
|
||||||
|
|
||||||
queue_ack:
|
queue_ack:
|
||||||
spin_lock_irqsave(&qp->s_lock, flags);
|
|
||||||
dev->n_rc_qacks++;
|
dev->n_rc_qacks++;
|
||||||
qp->s_flags |= IPATH_S_ACK_PENDING;
|
qp->s_flags |= IPATH_S_ACK_PENDING;
|
||||||
qp->s_nak_state = qp->r_nak_state;
|
qp->s_nak_state = qp->r_nak_state;
|
||||||
|
Loading…
Reference in New Issue
Block a user