mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 01:04:19 +08:00
skbuff: move __alloc_skb() next to the other skb allocation functions
In preparation before reusing several functions in all three skb allocation variants, move __alloc_skb() next to the __netdev_alloc_skb() and __napi_alloc_skb(). No functional changes. Signed-off-by: Alexander Lobakin <alobakin@pm.me> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
773dc50d71
commit
5381b23d5b
@ -119,148 +119,6 @@ static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
|
||||
skb_panic(skb, sz, addr, __func__);
|
||||
}
|
||||
|
||||
/*
|
||||
* kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
|
||||
* the caller if emergency pfmemalloc reserves are being used. If it is and
|
||||
* the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
|
||||
* may be used. Otherwise, the packet data may be discarded until enough
|
||||
* memory is free
|
||||
*/
|
||||
#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
|
||||
__kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
|
||||
|
||||
static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
|
||||
unsigned long ip, bool *pfmemalloc)
|
||||
{
|
||||
void *obj;
|
||||
bool ret_pfmemalloc = false;
|
||||
|
||||
/*
|
||||
* Try a regular allocation, when that fails and we're not entitled
|
||||
* to the reserves, fail.
|
||||
*/
|
||||
obj = kmalloc_node_track_caller(size,
|
||||
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
|
||||
node);
|
||||
if (obj || !(gfp_pfmemalloc_allowed(flags)))
|
||||
goto out;
|
||||
|
||||
/* Try again but now we are using pfmemalloc reserves */
|
||||
ret_pfmemalloc = true;
|
||||
obj = kmalloc_node_track_caller(size, flags, node);
|
||||
|
||||
out:
|
||||
if (pfmemalloc)
|
||||
*pfmemalloc = ret_pfmemalloc;
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
|
||||
* 'private' fields and also do memory statistics to find all the
|
||||
* [BEEP] leaks.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* __alloc_skb - allocate a network buffer
|
||||
* @size: size to allocate
|
||||
* @gfp_mask: allocation mask
|
||||
* @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
|
||||
* instead of head cache and allocate a cloned (child) skb.
|
||||
* If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
|
||||
* allocations in case the data is required for writeback
|
||||
* @node: numa node to allocate memory on
|
||||
*
|
||||
* Allocate a new &sk_buff. The returned buffer has no headroom and a
|
||||
* tail room of at least size bytes. The object has a reference count
|
||||
* of one. The return is the buffer. On a failure the return is %NULL.
|
||||
*
|
||||
* Buffers may only be allocated from interrupts using a @gfp_mask of
|
||||
* %GFP_ATOMIC.
|
||||
*/
|
||||
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
int flags, int node)
|
||||
{
|
||||
struct kmem_cache *cache;
|
||||
struct skb_shared_info *shinfo;
|
||||
struct sk_buff *skb;
|
||||
u8 *data;
|
||||
bool pfmemalloc;
|
||||
|
||||
cache = (flags & SKB_ALLOC_FCLONE)
|
||||
? skbuff_fclone_cache : skbuff_head_cache;
|
||||
|
||||
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
/* Get the HEAD */
|
||||
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
|
||||
if (!skb)
|
||||
goto out;
|
||||
prefetchw(skb);
|
||||
|
||||
/* We do our best to align skb_shared_info on a separate cache
|
||||
* line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
|
||||
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
|
||||
* Both skb->head and skb_shared_info are cache line aligned.
|
||||
*/
|
||||
size = SKB_DATA_ALIGN(size);
|
||||
size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
|
||||
if (!data)
|
||||
goto nodata;
|
||||
/* kmalloc(size) might give us more room than requested.
|
||||
* Put skb_shared_info exactly at the end of allocated zone,
|
||||
* to allow max possible filling before reallocation.
|
||||
*/
|
||||
size = SKB_WITH_OVERHEAD(ksize(data));
|
||||
prefetchw(data + size);
|
||||
|
||||
/*
|
||||
* Only clear those fields we need to clear, not those that we will
|
||||
* actually initialise below. Hence, don't put any more fields after
|
||||
* the tail pointer in struct sk_buff!
|
||||
*/
|
||||
memset(skb, 0, offsetof(struct sk_buff, tail));
|
||||
/* Account for allocated memory : skb + skb->head */
|
||||
skb->truesize = SKB_TRUESIZE(size);
|
||||
skb->pfmemalloc = pfmemalloc;
|
||||
refcount_set(&skb->users, 1);
|
||||
skb->head = data;
|
||||
skb->data = data;
|
||||
skb_reset_tail_pointer(skb);
|
||||
skb->end = skb->tail + size;
|
||||
skb->mac_header = (typeof(skb->mac_header))~0U;
|
||||
skb->transport_header = (typeof(skb->transport_header))~0U;
|
||||
|
||||
/* make sure we initialize shinfo sequentially */
|
||||
shinfo = skb_shinfo(skb);
|
||||
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
|
||||
atomic_set(&shinfo->dataref, 1);
|
||||
|
||||
if (flags & SKB_ALLOC_FCLONE) {
|
||||
struct sk_buff_fclones *fclones;
|
||||
|
||||
fclones = container_of(skb, struct sk_buff_fclones, skb1);
|
||||
|
||||
skb->fclone = SKB_FCLONE_ORIG;
|
||||
refcount_set(&fclones->fclone_ref, 1);
|
||||
|
||||
fclones->skb2.fclone = SKB_FCLONE_CLONE;
|
||||
}
|
||||
|
||||
skb_set_kcov_handle(skb, kcov_common_handle());
|
||||
|
||||
out:
|
||||
return skb;
|
||||
nodata:
|
||||
kmem_cache_free(cache, skb);
|
||||
skb = NULL;
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(__alloc_skb);
|
||||
|
||||
/* Caller must provide SKB that is memset cleared */
|
||||
static struct sk_buff *__build_skb_around(struct sk_buff *skb,
|
||||
void *data, unsigned int frag_size)
|
||||
@ -408,6 +266,148 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
|
||||
}
|
||||
EXPORT_SYMBOL(__netdev_alloc_frag_align);
|
||||
|
||||
/*
|
||||
* kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
|
||||
* the caller if emergency pfmemalloc reserves are being used. If it is and
|
||||
* the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
|
||||
* may be used. Otherwise, the packet data may be discarded until enough
|
||||
* memory is free
|
||||
*/
|
||||
#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
|
||||
__kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
|
||||
|
||||
static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
|
||||
unsigned long ip, bool *pfmemalloc)
|
||||
{
|
||||
void *obj;
|
||||
bool ret_pfmemalloc = false;
|
||||
|
||||
/*
|
||||
* Try a regular allocation, when that fails and we're not entitled
|
||||
* to the reserves, fail.
|
||||
*/
|
||||
obj = kmalloc_node_track_caller(size,
|
||||
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
|
||||
node);
|
||||
if (obj || !(gfp_pfmemalloc_allowed(flags)))
|
||||
goto out;
|
||||
|
||||
/* Try again but now we are using pfmemalloc reserves */
|
||||
ret_pfmemalloc = true;
|
||||
obj = kmalloc_node_track_caller(size, flags, node);
|
||||
|
||||
out:
|
||||
if (pfmemalloc)
|
||||
*pfmemalloc = ret_pfmemalloc;
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
|
||||
* 'private' fields and also do memory statistics to find all the
|
||||
* [BEEP] leaks.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* __alloc_skb - allocate a network buffer
|
||||
* @size: size to allocate
|
||||
* @gfp_mask: allocation mask
|
||||
* @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
|
||||
* instead of head cache and allocate a cloned (child) skb.
|
||||
* If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
|
||||
* allocations in case the data is required for writeback
|
||||
* @node: numa node to allocate memory on
|
||||
*
|
||||
* Allocate a new &sk_buff. The returned buffer has no headroom and a
|
||||
* tail room of at least size bytes. The object has a reference count
|
||||
* of one. The return is the buffer. On a failure the return is %NULL.
|
||||
*
|
||||
* Buffers may only be allocated from interrupts using a @gfp_mask of
|
||||
* %GFP_ATOMIC.
|
||||
*/
|
||||
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
||||
int flags, int node)
|
||||
{
|
||||
struct kmem_cache *cache;
|
||||
struct skb_shared_info *shinfo;
|
||||
struct sk_buff *skb;
|
||||
u8 *data;
|
||||
bool pfmemalloc;
|
||||
|
||||
cache = (flags & SKB_ALLOC_FCLONE)
|
||||
? skbuff_fclone_cache : skbuff_head_cache;
|
||||
|
||||
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
/* Get the HEAD */
|
||||
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
|
||||
if (!skb)
|
||||
goto out;
|
||||
prefetchw(skb);
|
||||
|
||||
/* We do our best to align skb_shared_info on a separate cache
|
||||
* line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
|
||||
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
|
||||
* Both skb->head and skb_shared_info are cache line aligned.
|
||||
*/
|
||||
size = SKB_DATA_ALIGN(size);
|
||||
size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
|
||||
if (!data)
|
||||
goto nodata;
|
||||
/* kmalloc(size) might give us more room than requested.
|
||||
* Put skb_shared_info exactly at the end of allocated zone,
|
||||
* to allow max possible filling before reallocation.
|
||||
*/
|
||||
size = SKB_WITH_OVERHEAD(ksize(data));
|
||||
prefetchw(data + size);
|
||||
|
||||
/*
|
||||
* Only clear those fields we need to clear, not those that we will
|
||||
* actually initialise below. Hence, don't put any more fields after
|
||||
* the tail pointer in struct sk_buff!
|
||||
*/
|
||||
memset(skb, 0, offsetof(struct sk_buff, tail));
|
||||
/* Account for allocated memory : skb + skb->head */
|
||||
skb->truesize = SKB_TRUESIZE(size);
|
||||
skb->pfmemalloc = pfmemalloc;
|
||||
refcount_set(&skb->users, 1);
|
||||
skb->head = data;
|
||||
skb->data = data;
|
||||
skb_reset_tail_pointer(skb);
|
||||
skb->end = skb->tail + size;
|
||||
skb->mac_header = (typeof(skb->mac_header))~0U;
|
||||
skb->transport_header = (typeof(skb->transport_header))~0U;
|
||||
|
||||
/* make sure we initialize shinfo sequentially */
|
||||
shinfo = skb_shinfo(skb);
|
||||
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
|
||||
atomic_set(&shinfo->dataref, 1);
|
||||
|
||||
if (flags & SKB_ALLOC_FCLONE) {
|
||||
struct sk_buff_fclones *fclones;
|
||||
|
||||
fclones = container_of(skb, struct sk_buff_fclones, skb1);
|
||||
|
||||
skb->fclone = SKB_FCLONE_ORIG;
|
||||
refcount_set(&fclones->fclone_ref, 1);
|
||||
|
||||
fclones->skb2.fclone = SKB_FCLONE_CLONE;
|
||||
}
|
||||
|
||||
skb_set_kcov_handle(skb, kcov_common_handle());
|
||||
|
||||
out:
|
||||
return skb;
|
||||
nodata:
|
||||
kmem_cache_free(cache, skb);
|
||||
skb = NULL;
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL(__alloc_skb);
|
||||
|
||||
/**
|
||||
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
||||
* @dev: network device to receive on
|
||||
|
Loading…
Reference in New Issue
Block a user