2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00
linux-next/include/net/cls_cgroup.h
Daniel Borkmann b87a173e25 cls_cgroup: factor out classid retrieval
Split out retrieving the cgroups net_cls classid retrieval into its
own function, so that it can be reused later on from other parts of
the traffic control subsystem. If there's no skb->sk, then the small
helper returns 0 as well, which in cls_cgroup terms means 'could not
classify'.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Cc: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-20 12:41:30 -07:00

87 lines
2.0 KiB
C

/*
* cls_cgroup.h Control Group Classifier
*
* Authors: Thomas Graf <tgraf@suug.ch>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _NET_CLS_CGROUP_H
#define _NET_CLS_CGROUP_H
#include <linux/cgroup.h>
#include <linux/hardirq.h>
#include <linux/rcupdate.h>
#include <net/sock.h>
#ifdef CONFIG_CGROUP_NET_CLASSID
struct cgroup_cls_state {
struct cgroup_subsys_state css;
u32 classid;
};
struct cgroup_cls_state *task_cls_state(struct task_struct *p);
static inline u32 task_cls_classid(struct task_struct *p)
{
u32 classid;
if (in_interrupt())
return 0;
rcu_read_lock();
classid = container_of(task_css(p, net_cls_cgrp_id),
struct cgroup_cls_state, css)->classid;
rcu_read_unlock();
return classid;
}
static inline void sock_update_classid(struct sock *sk)
{
u32 classid;
classid = task_cls_classid(current);
if (classid != sk->sk_classid)
sk->sk_classid = classid;
}
static inline u32 task_get_classid(const struct sk_buff *skb)
{
u32 classid = task_cls_state(current)->classid;
/* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
* would lead to false results.
*
* This test assumes that all callers of dev_queue_xmit() explicitly
* disable bh. Knowing this, it is possible to detect softirq based
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
if (in_serving_softirq()) {
/* If there is an sk_classid we'll use that. */
if (!skb->sk)
return 0;
classid = skb->sk->sk_classid;
}
return classid;
}
#else /* !CONFIG_CGROUP_NET_CLASSID */
static inline void sock_update_classid(struct sock *sk)
{
}
static inline u32 task_get_classid(const struct sk_buff *skb)
{
return 0;
}
#endif /* CONFIG_CGROUP_NET_CLASSID */
#endif /* _NET_CLS_CGROUP_H */