mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-14 15:54:15 +08:00
xprtrdma: Boost maximum transport header size
Although I haven't seen any performance results that justify it,
I've received several complaints that NFS/RDMA no longer supports
a maximum rsize and wsize of 1MB. These days it is somewhat smaller.
To simplify the logic that determines whether a chunk list is
necessary, the implementation uses a fixed maximum size of the
transport header. Currently that maximum size is 256 bytes, one
quarter of the default inline threshold size for RPC/RDMA v1.
Since commit a78868497c
("xprtrdma: Reduce max_frwr_depth"), the
size of chunks is also smaller to take advantage of inline page
lists in device internal MR data structures.
The combination of these two design choices has reduced the maximum
NFS rsize and wsize that can be used for most RNIC/HCAs. Increasing
the maximum transport header size and the maximum number of RDMA
segments it can contain increases the negotiated maximum rsize/wsize
on common RNIC/HCAs.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
36bdd9056b
commit
f3c66a2f56
@ -53,6 +53,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sunrpc/addr.h>
|
||||
#include <linux/sunrpc/svc_rdma.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
#include <asm/bitops.h>
|
||||
@ -1000,12 +1001,18 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
|
||||
struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
|
||||
struct rpcrdma_regbuf *rb;
|
||||
struct rpcrdma_req *req;
|
||||
size_t maxhdrsize;
|
||||
|
||||
req = kzalloc(sizeof(*req), flags);
|
||||
if (req == NULL)
|
||||
goto out1;
|
||||
|
||||
rb = rpcrdma_regbuf_alloc(RPCRDMA_HDRBUF_SIZE, DMA_TO_DEVICE, flags);
|
||||
/* Compute maximum header buffer size in bytes */
|
||||
maxhdrsize = rpcrdma_fixed_maxsz + 3 +
|
||||
r_xprt->rx_ia.ri_max_segs * rpcrdma_readchunk_maxsz;
|
||||
maxhdrsize *= sizeof(__be32);
|
||||
rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
|
||||
DMA_TO_DEVICE, flags);
|
||||
if (!rb)
|
||||
goto out2;
|
||||
req->rl_rdmabuf = rb;
|
||||
|
@ -155,25 +155,22 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
|
||||
|
||||
/* To ensure a transport can always make forward progress,
|
||||
* the number of RDMA segments allowed in header chunk lists
|
||||
* is capped at 8. This prevents less-capable devices and
|
||||
* memory registrations from overrunning the Send buffer
|
||||
* while building chunk lists.
|
||||
* is capped at 16. This prevents less-capable devices from
|
||||
* overrunning the Send buffer while building chunk lists.
|
||||
*
|
||||
* Elements of the Read list take up more room than the
|
||||
* Write list or Reply chunk. 8 read segments means the Read
|
||||
* list (or Write list or Reply chunk) cannot consume more
|
||||
* than
|
||||
* Write list or Reply chunk. 16 read segments means the
|
||||
* chunk lists cannot consume more than
|
||||
*
|
||||
* ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
|
||||
* ((16 + 2) * read segment size) + 1 XDR words,
|
||||
*
|
||||
* And the fixed part of the header is another 24 bytes.
|
||||
*
|
||||
* The smallest inline threshold is 1024 bytes, ensuring that
|
||||
* at least 750 bytes are available for RPC messages.
|
||||
* or about 400 bytes. The fixed part of the header is
|
||||
* another 24 bytes. Thus when the inline threshold is
|
||||
* 1024 bytes, at least 600 bytes are available for RPC
|
||||
* message bodies.
|
||||
*/
|
||||
enum {
|
||||
RPCRDMA_MAX_HDR_SEGS = 8,
|
||||
RPCRDMA_HDRBUF_SIZE = 256,
|
||||
RPCRDMA_MAX_HDR_SEGS = 16,
|
||||
};
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user