mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 01:24:12 +08:00
net: allow alloc_skb_with_frags() to allocate bigger packets
Refactor alloc_skb_with_frags() to allow bigger packets allocations. Instead of assuming that only order-0 allocations will be attempted, use the caller supplied max order. v2: try harder to use high-order pages, per Willem feedback. Link: https://lore.kernel.org/netdev/CANn89iJQfmc_KeUr3TeXvsLQwo3ZymyoCr7Y6AnHrkWSuz0yAg@mail.gmail.com/ Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Tahsin Erdogan <trdgn@amazon.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Link: https://lore.kernel.org/r/20230801205254.400094-2-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
49c467dca3
commit
09c2c90705
@ -6204,7 +6204,7 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
|
||||
*
|
||||
* @header_len: size of linear part
|
||||
* @data_len: needed length in frags
|
||||
* @max_page_order: max page order desired.
|
||||
* @order: max page order desired.
|
||||
* @errcode: pointer to error code if any
|
||||
* @gfp_mask: allocation mask
|
||||
*
|
||||
@ -6212,21 +6212,17 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
|
||||
*/
|
||||
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
|
||||
unsigned long data_len,
|
||||
int max_page_order,
|
||||
int order,
|
||||
int *errcode,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
unsigned long chunk;
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
int i;
|
||||
int nr_frags = 0;
|
||||
|
||||
*errcode = -EMSGSIZE;
|
||||
/* Note this test could be relaxed, if we succeed to allocate
|
||||
* high order pages...
|
||||
*/
|
||||
if (npages > MAX_SKB_FRAGS)
|
||||
if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
|
||||
return NULL;
|
||||
|
||||
*errcode = -ENOBUFS;
|
||||
@ -6234,34 +6230,32 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
skb->truesize += npages << PAGE_SHIFT;
|
||||
|
||||
for (i = 0; npages > 0; i++) {
|
||||
int order = max_page_order;
|
||||
|
||||
while (order) {
|
||||
if (npages >= 1 << order) {
|
||||
page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
|
||||
__GFP_COMP |
|
||||
__GFP_NOWARN,
|
||||
order);
|
||||
if (page)
|
||||
goto fill_page;
|
||||
/* Do not retry other high order allocations */
|
||||
order = 1;
|
||||
max_page_order = 0;
|
||||
}
|
||||
order--;
|
||||
}
|
||||
page = alloc_page(gfp_mask);
|
||||
if (!page)
|
||||
while (data_len) {
|
||||
if (nr_frags == MAX_SKB_FRAGS - 1)
|
||||
goto failure;
|
||||
fill_page:
|
||||
while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
|
||||
order--;
|
||||
|
||||
if (order) {
|
||||
page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
|
||||
__GFP_COMP |
|
||||
__GFP_NOWARN,
|
||||
order);
|
||||
if (!page) {
|
||||
order--;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
page = alloc_page(gfp_mask);
|
||||
if (!page)
|
||||
goto failure;
|
||||
}
|
||||
chunk = min_t(unsigned long, data_len,
|
||||
PAGE_SIZE << order);
|
||||
skb_fill_page_desc(skb, i, page, 0, chunk);
|
||||
skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
|
||||
nr_frags++;
|
||||
skb->truesize += (PAGE_SIZE << order);
|
||||
data_len -= chunk;
|
||||
npages -= 1 << order;
|
||||
}
|
||||
return skb;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user