mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
b013840810
In PF_PACKET's packet mmap(), we can avoid using one atomic_inc() and one atomic_dec() call in skb destructor and use a percpu reference count instead in order to determine if packets are still pending to be sent out. Micro-benchmark with [1] that has been slightly modified (that is, protcol = 0 in socket(2) and bind(2)), example on a rather crappy testing machine; I expect it to scale and have even better results on bigger machines: ./packet_mm_tx -s7000 -m7200 -z700000 em1, avg over 2500 runs: With patch: 4,022,015 cyc Without patch: 4,812,994 cyc time ./packet_mm_tx -s64 -c10000000 em1 > /dev/null, stable: With patch: real 1m32.241s user 0m0.287s sys 1m29.316s Without patch: real 1m38.386s user 0m0.265s sys 1m35.572s In function tpacket_snd(), it is okay to use packet_read_pending() since in fast-path we short-circuit the condition already with ph != NULL, since we have next frames to process. In case we have MSG_DONTWAIT, we also do not execute this path as need_wait is false here anyway, and in case of _no_ MSG_DONTWAIT flag, it is okay to call a packet_read_pending(), because when we ever reach that path, we're done processing outgoing frames anyway and only look if there are skbs still outstanding to be orphaned. We can stay lockless in this percpu counter since it's acceptable when we reach this path for the sum to be imprecise first, but we'll level out at 0 after all pending frames have reached the skb destructor eventually through tx reclaim. When people pin a tx process to particular CPUs, we expect overflows to happen in the reference counter as on one CPU we expect heavy increase; and distributed through ksoftirqd on all CPUs a decrease, for example. As David Laight points out, since the C language doesn't define the result of signed int overflow (i.e. rather than wrap, it is allowed to saturate as a possible outcome), we have to use unsigned int as reference count. The sum over all CPUs when tx is complete will result in 0 again. The BUG_ON() in tpacket_destruct_skb() we can remove as well. It can _only_ be set from inside tpacket_snd() path and we made sure to increase tx_ring.pending in any case before we called po->xmit(skb). So testing for tx_ring.pending == 0 is not too useful. Instead, it would rather have been useful to test if lower layers didn't orphan the skb so that we're missing ring slots being put back to TP_STATUS_AVAILABLE. But such a bug will be caught in user space already as we end up realizing that we do not have any TP_STATUS_AVAILABLE slots left anymore. Therefore, we're all set. Btw, in case of RX_RING path, we do not make use of the pending member, therefore we also don't need to use up any percpu memory here. Also note that __alloc_percpu() already returns a zero-filled percpu area, so initialization is done already. [1] http://wiki.ipxwarzone.com/index.php5?title=Linux_packet_mmap Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
260 lines
6.1 KiB
C
260 lines
6.1 KiB
C
#include <linux/module.h>
|
|
#include <linux/sock_diag.h>
|
|
#include <linux/net.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/packet_diag.h>
|
|
#include <linux/percpu.h>
|
|
#include <net/net_namespace.h>
|
|
#include <net/sock.h>
|
|
|
|
#include "internal.h"
|
|
|
|
static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
|
|
{
|
|
struct packet_diag_info pinfo;
|
|
|
|
pinfo.pdi_index = po->ifindex;
|
|
pinfo.pdi_version = po->tp_version;
|
|
pinfo.pdi_reserve = po->tp_reserve;
|
|
pinfo.pdi_copy_thresh = po->copy_thresh;
|
|
pinfo.pdi_tstamp = po->tp_tstamp;
|
|
|
|
pinfo.pdi_flags = 0;
|
|
if (po->running)
|
|
pinfo.pdi_flags |= PDI_RUNNING;
|
|
if (po->auxdata)
|
|
pinfo.pdi_flags |= PDI_AUXDATA;
|
|
if (po->origdev)
|
|
pinfo.pdi_flags |= PDI_ORIGDEV;
|
|
if (po->has_vnet_hdr)
|
|
pinfo.pdi_flags |= PDI_VNETHDR;
|
|
if (po->tp_loss)
|
|
pinfo.pdi_flags |= PDI_LOSS;
|
|
|
|
return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
|
|
}
|
|
|
|
static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
|
|
{
|
|
struct nlattr *mca;
|
|
struct packet_mclist *ml;
|
|
|
|
mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
|
|
if (!mca)
|
|
return -EMSGSIZE;
|
|
|
|
rtnl_lock();
|
|
for (ml = po->mclist; ml; ml = ml->next) {
|
|
struct packet_diag_mclist *dml;
|
|
|
|
dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
|
|
if (!dml) {
|
|
rtnl_unlock();
|
|
nla_nest_cancel(nlskb, mca);
|
|
return -EMSGSIZE;
|
|
}
|
|
|
|
dml->pdmc_index = ml->ifindex;
|
|
dml->pdmc_type = ml->type;
|
|
dml->pdmc_alen = ml->alen;
|
|
dml->pdmc_count = ml->count;
|
|
BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
|
|
memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
|
|
}
|
|
|
|
rtnl_unlock();
|
|
nla_nest_end(nlskb, mca);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
|
|
struct sk_buff *nlskb)
|
|
{
|
|
struct packet_diag_ring pdr;
|
|
|
|
if (!ring->pg_vec || ((ver > TPACKET_V2) &&
|
|
(nl_type == PACKET_DIAG_TX_RING)))
|
|
return 0;
|
|
|
|
pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
|
|
pdr.pdr_block_nr = ring->pg_vec_len;
|
|
pdr.pdr_frame_size = ring->frame_size;
|
|
pdr.pdr_frame_nr = ring->frame_max + 1;
|
|
|
|
if (ver > TPACKET_V2) {
|
|
pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
|
|
pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
|
|
pdr.pdr_features = ring->prb_bdqc.feature_req_word;
|
|
} else {
|
|
pdr.pdr_retire_tmo = 0;
|
|
pdr.pdr_sizeof_priv = 0;
|
|
pdr.pdr_features = 0;
|
|
}
|
|
|
|
return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
|
|
}
|
|
|
|
static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
|
|
{
|
|
int ret;
|
|
|
|
mutex_lock(&po->pg_vec_lock);
|
|
ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
|
|
PACKET_DIAG_RX_RING, skb);
|
|
if (!ret)
|
|
ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
|
|
PACKET_DIAG_TX_RING, skb);
|
|
mutex_unlock(&po->pg_vec_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
|
|
{
|
|
int ret = 0;
|
|
|
|
mutex_lock(&fanout_mutex);
|
|
if (po->fanout) {
|
|
u32 val;
|
|
|
|
val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
|
|
ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
|
|
}
|
|
mutex_unlock(&fanout_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
|
|
struct packet_diag_req *req,
|
|
struct user_namespace *user_ns,
|
|
u32 portid, u32 seq, u32 flags, int sk_ino)
|
|
{
|
|
struct nlmsghdr *nlh;
|
|
struct packet_diag_msg *rp;
|
|
struct packet_sock *po = pkt_sk(sk);
|
|
|
|
nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
|
|
if (!nlh)
|
|
return -EMSGSIZE;
|
|
|
|
rp = nlmsg_data(nlh);
|
|
rp->pdiag_family = AF_PACKET;
|
|
rp->pdiag_type = sk->sk_type;
|
|
rp->pdiag_num = ntohs(po->num);
|
|
rp->pdiag_ino = sk_ino;
|
|
sock_diag_save_cookie(sk, rp->pdiag_cookie);
|
|
|
|
if ((req->pdiag_show & PACKET_SHOW_INFO) &&
|
|
pdiag_put_info(po, skb))
|
|
goto out_nlmsg_trim;
|
|
|
|
if ((req->pdiag_show & PACKET_SHOW_INFO) &&
|
|
nla_put_u32(skb, PACKET_DIAG_UID,
|
|
from_kuid_munged(user_ns, sock_i_uid(sk))))
|
|
goto out_nlmsg_trim;
|
|
|
|
if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
|
|
pdiag_put_mclist(po, skb))
|
|
goto out_nlmsg_trim;
|
|
|
|
if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
|
|
pdiag_put_rings_cfg(po, skb))
|
|
goto out_nlmsg_trim;
|
|
|
|
if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
|
|
pdiag_put_fanout(po, skb))
|
|
goto out_nlmsg_trim;
|
|
|
|
if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
|
|
sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
|
|
goto out_nlmsg_trim;
|
|
|
|
if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
|
|
sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER))
|
|
goto out_nlmsg_trim;
|
|
|
|
return nlmsg_end(skb, nlh);
|
|
|
|
out_nlmsg_trim:
|
|
nlmsg_cancel(skb, nlh);
|
|
return -EMSGSIZE;
|
|
}
|
|
|
|
static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|
{
|
|
int num = 0, s_num = cb->args[0];
|
|
struct packet_diag_req *req;
|
|
struct net *net;
|
|
struct sock *sk;
|
|
|
|
net = sock_net(skb->sk);
|
|
req = nlmsg_data(cb->nlh);
|
|
|
|
mutex_lock(&net->packet.sklist_lock);
|
|
sk_for_each(sk, &net->packet.sklist) {
|
|
if (!net_eq(sock_net(sk), net))
|
|
continue;
|
|
if (num < s_num)
|
|
goto next;
|
|
|
|
if (sk_diag_fill(sk, skb, req,
|
|
sk_user_ns(NETLINK_CB(cb->skb).sk),
|
|
NETLINK_CB(cb->skb).portid,
|
|
cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
|
sock_i_ino(sk)) < 0)
|
|
goto done;
|
|
next:
|
|
num++;
|
|
}
|
|
done:
|
|
mutex_unlock(&net->packet.sklist_lock);
|
|
cb->args[0] = num;
|
|
|
|
return skb->len;
|
|
}
|
|
|
|
static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
|
{
|
|
int hdrlen = sizeof(struct packet_diag_req);
|
|
struct net *net = sock_net(skb->sk);
|
|
struct packet_diag_req *req;
|
|
|
|
if (nlmsg_len(h) < hdrlen)
|
|
return -EINVAL;
|
|
|
|
req = nlmsg_data(h);
|
|
/* Make it possible to support protocol filtering later */
|
|
if (req->sdiag_protocol)
|
|
return -EINVAL;
|
|
|
|
if (h->nlmsg_flags & NLM_F_DUMP) {
|
|
struct netlink_dump_control c = {
|
|
.dump = packet_diag_dump,
|
|
};
|
|
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
|
|
} else
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
static const struct sock_diag_handler packet_diag_handler = {
|
|
.family = AF_PACKET,
|
|
.dump = packet_diag_handler_dump,
|
|
};
|
|
|
|
static int __init packet_diag_init(void)
|
|
{
|
|
return sock_diag_register(&packet_diag_handler);
|
|
}
|
|
|
|
static void __exit packet_diag_exit(void)
|
|
{
|
|
sock_diag_unregister(&packet_diag_handler);
|
|
}
|
|
|
|
module_init(packet_diag_init);
|
|
module_exit(packet_diag_exit);
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);
|