mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
net: add __pskb_copy_fclone and pskb_copy_for_clone
There are several instances where a pskb_copy or __pskb_copy is immediately followed by an skb_clone. Add a couple of new functions to allow the copy skb to be allocated from the fclone cache and thus speed up subsequent skb_clone calls. Cc: Alexander Smirnov <alex.bluesman.smirnov@gmail.com> Cc: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> Cc: Marek Lindner <mareklindner@neomailbox.ch> Cc: Simon Wunderlich <sw@simonwunderlich.de> Cc: Antonio Quartulli <antonio@meshcoding.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Gustavo Padovan <gustavo@padovan.org> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Arvid Brodin <arvid.brodin@alten.se> Cc: Patrick McHardy <kaber@trash.net> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> Cc: Lauro Ramos Venancio <lauro.venancio@openbossa.org> Cc: Aloisio Almeida Jr <aloisio.almeida@openbossa.org> Cc: Samuel Ortiz <sameo@linux.intel.com> Cc: Jon Maloy <jon.maloy@ericsson.com> Cc: Allan Stephens <allan.stephens@windriver.com> Cc: Andrew Hendry <andrew.hendry@gmail.com> Cc: Eric Dumazet <edumazet@google.com> Reviewed-by: Christoph Paasch <christoph.paasch@uclouvain.be> Signed-off-by: Octavian Purdila <octavian.purdila@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1a0b20b257
commit
bad93e9d4e
@ -744,7 +744,13 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
|
||||
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
|
||||
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
|
||||
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
|
||||
struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
|
||||
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
|
||||
gfp_t gfp_mask, bool fclone);
|
||||
static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
|
||||
}
|
||||
|
||||
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
|
||||
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
|
||||
@ -2238,6 +2244,14 @@ static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
|
||||
return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
|
||||
}
|
||||
|
||||
|
||||
static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* skb_clone_writable - is the header of a clone writable
|
||||
* @skb: buffer to check
|
||||
|
@ -594,7 +594,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
|
||||
if (!neigh_node)
|
||||
goto free_orig;
|
||||
|
||||
tmp_skb = pskb_copy(skb, GFP_ATOMIC);
|
||||
tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
|
||||
if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
|
||||
cand[i].orig_node,
|
||||
packet_subtype)) {
|
||||
|
@ -1344,7 +1344,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
|
||||
struct ethhdr *ethhdr;
|
||||
|
||||
/* Copy skb header to change the mac header */
|
||||
skb = pskb_copy(skb, GFP_ATOMIC);
|
||||
skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
|
@ -143,7 +143,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
if (!skb_copy) {
|
||||
/* Create a private copy with headroom */
|
||||
skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
|
||||
skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
|
||||
if (!skb_copy)
|
||||
continue;
|
||||
|
||||
@ -247,8 +247,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
struct hci_mon_hdr *hdr;
|
||||
|
||||
/* Create a private copy with headroom */
|
||||
skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
|
||||
GFP_ATOMIC);
|
||||
skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
|
||||
GFP_ATOMIC, true);
|
||||
if (!skb_copy)
|
||||
continue;
|
||||
|
||||
|
@ -951,10 +951,13 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
|
||||
EXPORT_SYMBOL(skb_copy);
|
||||
|
||||
/**
|
||||
* __pskb_copy - create copy of an sk_buff with private head.
|
||||
* __pskb_copy_fclone - create copy of an sk_buff with private head.
|
||||
* @skb: buffer to copy
|
||||
* @headroom: headroom of new skb
|
||||
* @gfp_mask: allocation priority
|
||||
* @fclone: if true allocate the copy of the skb from the fclone
|
||||
* cache instead of the head cache; it is recommended to set this
|
||||
* to true for the cases where the copy will likely be cloned
|
||||
*
|
||||
* Make a copy of both an &sk_buff and part of its data, located
|
||||
* in header. Fragmented data remain shared. This is used when
|
||||
@ -964,11 +967,12 @@ EXPORT_SYMBOL(skb_copy);
|
||||
* The returned buffer has a reference count of 1.
|
||||
*/
|
||||
|
||||
struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
|
||||
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
|
||||
gfp_t gfp_mask, bool fclone)
|
||||
{
|
||||
unsigned int size = skb_headlen(skb) + headroom;
|
||||
struct sk_buff *n = __alloc_skb(size, gfp_mask,
|
||||
skb_alloc_rx_flag(skb), NUMA_NO_NODE);
|
||||
int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
|
||||
struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
|
||||
|
||||
if (!n)
|
||||
goto out;
|
||||
@ -1008,7 +1012,7 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
|
||||
out:
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL(__pskb_copy);
|
||||
EXPORT_SYMBOL(__pskb_copy_fclone);
|
||||
|
||||
/**
|
||||
* pskb_expand_head - reallocate header of &sk_buff
|
||||
|
@ -680,8 +680,8 @@ void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
|
||||
continue;
|
||||
|
||||
if (skb_copy == NULL) {
|
||||
skb_copy = __pskb_copy(skb, NFC_RAW_HEADER_SIZE,
|
||||
GFP_ATOMIC);
|
||||
skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
|
||||
GFP_ATOMIC, true);
|
||||
|
||||
if (skb_copy == NULL)
|
||||
continue;
|
||||
|
@ -378,8 +378,8 @@ void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb,
|
||||
|
||||
sk_for_each(sk, &raw_sk_list.head) {
|
||||
if (!skb_copy) {
|
||||
skb_copy = __pskb_copy(skb, NFC_RAW_HEADER_SIZE,
|
||||
GFP_ATOMIC);
|
||||
skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE,
|
||||
GFP_ATOMIC, true);
|
||||
if (!skb_copy)
|
||||
continue;
|
||||
|
||||
|
@ -653,7 +653,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
|
||||
tipc_bearer_send(b->identity, buf, &b->bcast_addr);
|
||||
} else {
|
||||
/* Avoid concurrent buffer access */
|
||||
tbuf = pskb_copy(buf, GFP_ATOMIC);
|
||||
tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
|
||||
if (!tbuf)
|
||||
break;
|
||||
tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
|
||||
|
Loading…
Reference in New Issue
Block a user