mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-05 12:13:57 +08:00
Merge branch 'snmp-stats-update'
Eric Dumazet says:
====================
net: snmp: update SNMP methods
In the old days (before linux-3.0), SNMP counters were duplicated,
one set for user context, and anther one for BH context.
After commit 8f0ea0fe3a
("snmp: reduce percpu needs by 50%")
we have a single copy, and what really matters is preemption being
enabled or disabled, since we use this_cpu_inc() or __this_cpu_inc()
respectively.
This patch series kills the obsolete STATS_USER() helpers,
and rename all XXX_BH() helpers to __XXX() ones, to more
closely match conventions used to update per cpu variables.
This is probably going to hurt maintainers job for a while,
since cherry-picks will not be clean, but this had to be
cleaned at one point. I am so sorry guys.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
210732d16d
@ -30,9 +30,9 @@ struct icmp_err {
|
||||
|
||||
extern const struct icmp_err icmp_err_convert[];
|
||||
#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
|
||||
#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
|
||||
#define __ICMP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.icmp_statistics, field)
|
||||
#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
|
||||
#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
|
||||
#define ICMPMSGIN_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
|
||||
|
||||
struct dst_entry;
|
||||
struct net_proto_family;
|
||||
|
@ -187,17 +187,15 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int len);
|
||||
|
||||
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
|
||||
#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
|
||||
#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
|
||||
#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
|
||||
#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
|
||||
#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
|
||||
#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
|
||||
#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
|
||||
#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
|
||||
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
|
||||
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
|
||||
#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
|
||||
#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
|
||||
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
|
||||
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
|
||||
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
|
||||
#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
|
||||
|
||||
u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
|
||||
unsigned long snmp_fold_field(void __percpu *mib, int offt);
|
||||
|
@ -121,21 +121,21 @@ struct frag_hdr {
|
||||
extern int sysctl_mld_max_msf;
|
||||
extern int sysctl_mld_qrv;
|
||||
|
||||
#define _DEVINC(net, statname, modifier, idev, field) \
|
||||
#define _DEVINC(net, statname, mod, idev, field) \
|
||||
({ \
|
||||
struct inet6_dev *_idev = (idev); \
|
||||
if (likely(_idev != NULL)) \
|
||||
SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \
|
||||
SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
|
||||
mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
|
||||
mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
|
||||
})
|
||||
|
||||
/* per device counters are atomic_long_t */
|
||||
#define _DEVINCATOMIC(net, statname, modifier, idev, field) \
|
||||
#define _DEVINCATOMIC(net, statname, mod, idev, field) \
|
||||
({ \
|
||||
struct inet6_dev *_idev = (idev); \
|
||||
if (likely(_idev != NULL)) \
|
||||
SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
|
||||
SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
|
||||
mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
|
||||
})
|
||||
|
||||
/* per device and per net counters are atomic_long_t */
|
||||
@ -147,46 +147,44 @@ extern int sysctl_mld_qrv;
|
||||
SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
|
||||
})
|
||||
|
||||
#define _DEVADD(net, statname, modifier, idev, field, val) \
|
||||
#define _DEVADD(net, statname, mod, idev, field, val) \
|
||||
({ \
|
||||
struct inet6_dev *_idev = (idev); \
|
||||
if (likely(_idev != NULL)) \
|
||||
SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \
|
||||
SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\
|
||||
mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
|
||||
mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
|
||||
})
|
||||
|
||||
#define _DEVUPD(net, statname, modifier, idev, field, val) \
|
||||
#define _DEVUPD(net, statname, mod, idev, field, val) \
|
||||
({ \
|
||||
struct inet6_dev *_idev = (idev); \
|
||||
if (likely(_idev != NULL)) \
|
||||
SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \
|
||||
SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\
|
||||
mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
|
||||
mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
|
||||
})
|
||||
|
||||
/* MIBs */
|
||||
|
||||
#define IP6_INC_STATS(net, idev,field) \
|
||||
_DEVINC(net, ipv6, 64, idev, field)
|
||||
#define IP6_INC_STATS_BH(net, idev,field) \
|
||||
_DEVINC(net, ipv6, 64_BH, idev, field)
|
||||
_DEVINC(net, ipv6, , idev, field)
|
||||
#define __IP6_INC_STATS(net, idev,field) \
|
||||
_DEVINC(net, ipv6, __, idev, field)
|
||||
#define IP6_ADD_STATS(net, idev,field,val) \
|
||||
_DEVADD(net, ipv6, 64, idev, field, val)
|
||||
#define IP6_ADD_STATS_BH(net, idev,field,val) \
|
||||
_DEVADD(net, ipv6, 64_BH, idev, field, val)
|
||||
_DEVADD(net, ipv6, , idev, field, val)
|
||||
#define __IP6_ADD_STATS(net, idev,field,val) \
|
||||
_DEVADD(net, ipv6, __, idev, field, val)
|
||||
#define IP6_UPD_PO_STATS(net, idev,field,val) \
|
||||
_DEVUPD(net, ipv6, 64, idev, field, val)
|
||||
#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \
|
||||
_DEVUPD(net, ipv6, 64_BH, idev, field, val)
|
||||
_DEVUPD(net, ipv6, , idev, field, val)
|
||||
#define __IP6_UPD_PO_STATS(net, idev,field,val) \
|
||||
_DEVUPD(net, ipv6, __, idev, field, val)
|
||||
#define ICMP6_INC_STATS(net, idev, field) \
|
||||
_DEVINCATOMIC(net, icmpv6, , idev, field)
|
||||
#define ICMP6_INC_STATS_BH(net, idev, field) \
|
||||
_DEVINCATOMIC(net, icmpv6, _BH, idev, field)
|
||||
#define __ICMP6_INC_STATS(net, idev, field) \
|
||||
_DEVINCATOMIC(net, icmpv6, __, idev, field)
|
||||
|
||||
#define ICMP6MSGOUT_INC_STATS(net, idev, field) \
|
||||
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
|
||||
#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \
|
||||
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
|
||||
#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \
|
||||
#define ICMP6MSGIN_INC_STATS(net, idev, field) \
|
||||
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
|
||||
|
||||
struct ip6_ra_chain {
|
||||
|
@ -205,10 +205,9 @@ extern int sysctl_sctp_wmem[3];
|
||||
*/
|
||||
|
||||
/* SCTP SNMP MIB stats handlers */
|
||||
#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
|
||||
#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
|
||||
#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
|
||||
#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
|
||||
#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
|
||||
#define __SCTP_INC_STATS(net, field) __SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
|
||||
#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
|
||||
|
||||
/* sctp mib definitions */
|
||||
enum {
|
||||
|
@ -123,12 +123,9 @@ struct linux_xfrm_mib {
|
||||
#define DECLARE_SNMP_STAT(type, name) \
|
||||
extern __typeof__(type) __percpu *name
|
||||
|
||||
#define SNMP_INC_STATS_BH(mib, field) \
|
||||
#define __SNMP_INC_STATS(mib, field) \
|
||||
__this_cpu_inc(mib->mibs[field])
|
||||
|
||||
#define SNMP_INC_STATS_USER(mib, field) \
|
||||
this_cpu_inc(mib->mibs[field])
|
||||
|
||||
#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
|
||||
atomic_long_inc(&mib->mibs[field])
|
||||
|
||||
@ -138,12 +135,9 @@ struct linux_xfrm_mib {
|
||||
#define SNMP_DEC_STATS(mib, field) \
|
||||
this_cpu_dec(mib->mibs[field])
|
||||
|
||||
#define SNMP_ADD_STATS_BH(mib, field, addend) \
|
||||
#define __SNMP_ADD_STATS(mib, field, addend) \
|
||||
__this_cpu_add(mib->mibs[field], addend)
|
||||
|
||||
#define SNMP_ADD_STATS_USER(mib, field, addend) \
|
||||
this_cpu_add(mib->mibs[field], addend)
|
||||
|
||||
#define SNMP_ADD_STATS(mib, field, addend) \
|
||||
this_cpu_add(mib->mibs[field], addend)
|
||||
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
|
||||
@ -152,7 +146,7 @@ struct linux_xfrm_mib {
|
||||
this_cpu_inc(ptr[basefield##PKTS]); \
|
||||
this_cpu_add(ptr[basefield##OCTETS], addend); \
|
||||
} while (0)
|
||||
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
|
||||
#define __SNMP_UPD_PO_STATS(mib, basefield, addend) \
|
||||
do { \
|
||||
__typeof__((mib->mibs) + 0) ptr = mib->mibs; \
|
||||
__this_cpu_inc(ptr[basefield##PKTS]); \
|
||||
@ -162,7 +156,7 @@ struct linux_xfrm_mib {
|
||||
|
||||
#if BITS_PER_LONG==32
|
||||
|
||||
#define SNMP_ADD_STATS64_BH(mib, field, addend) \
|
||||
#define __SNMP_ADD_STATS64(mib, field, addend) \
|
||||
do { \
|
||||
__typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
|
||||
u64_stats_update_begin(&ptr->syncp); \
|
||||
@ -170,20 +164,16 @@ struct linux_xfrm_mib {
|
||||
u64_stats_update_end(&ptr->syncp); \
|
||||
} while (0)
|
||||
|
||||
#define SNMP_ADD_STATS64_USER(mib, field, addend) \
|
||||
#define SNMP_ADD_STATS64(mib, field, addend) \
|
||||
do { \
|
||||
local_bh_disable(); \
|
||||
SNMP_ADD_STATS64_BH(mib, field, addend); \
|
||||
local_bh_enable(); \
|
||||
preempt_disable(); \
|
||||
__SNMP_ADD_STATS64(mib, field, addend); \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
#define SNMP_ADD_STATS64(mib, field, addend) \
|
||||
SNMP_ADD_STATS64_USER(mib, field, addend)
|
||||
|
||||
#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
|
||||
#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
|
||||
#define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
|
||||
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
|
||||
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
|
||||
#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \
|
||||
do { \
|
||||
__typeof__(*mib) *ptr; \
|
||||
ptr = raw_cpu_ptr((mib)); \
|
||||
@ -194,20 +184,18 @@ struct linux_xfrm_mib {
|
||||
} while (0)
|
||||
#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
|
||||
do { \
|
||||
local_bh_disable(); \
|
||||
SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \
|
||||
local_bh_enable(); \
|
||||
preempt_disable(); \
|
||||
__SNMP_UPD_PO_STATS64(mib, basefield, addend); \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
#else
|
||||
#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field)
|
||||
#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field)
|
||||
#define __SNMP_INC_STATS64(mib, field) __SNMP_INC_STATS(mib, field)
|
||||
#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field)
|
||||
#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field)
|
||||
#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
|
||||
#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
|
||||
#define __SNMP_ADD_STATS64(mib, field, addend) __SNMP_ADD_STATS(mib, field, addend)
|
||||
#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend)
|
||||
#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
|
||||
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
|
||||
#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) __SNMP_UPD_PO_STATS(mib, basefield, addend)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -332,9 +332,8 @@ bool tcp_check_oom(struct sock *sk, int shift);
|
||||
extern struct proto tcp_prot;
|
||||
|
||||
#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
|
||||
#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
|
||||
#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
|
||||
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
|
||||
#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
|
||||
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
|
||||
|
||||
void tcp_tasklet_init(void);
|
||||
@ -1298,10 +1297,10 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
|
||||
static inline void tcp_mib_init(struct net *net)
|
||||
{
|
||||
/* See RFC 2012 */
|
||||
TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
|
||||
TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
|
||||
TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
|
||||
TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
|
||||
TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
|
||||
TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
|
||||
TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
|
||||
TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
|
||||
}
|
||||
|
||||
/* from STCP */
|
||||
@ -1744,7 +1743,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
|
||||
__u16 *mss)
|
||||
{
|
||||
tcp_synq_overflow(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
|
||||
return ops->cookie_init_seq(skb, mss);
|
||||
}
|
||||
#else
|
||||
@ -1853,7 +1852,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
|
||||
static inline void tcp_listendrop(const struct sock *sk)
|
||||
{
|
||||
atomic_inc(&((struct sock *)sk)->sk_drops);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
}
|
||||
|
||||
#endif /* _TCP_H */
|
||||
|
@ -289,32 +289,32 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
|
||||
/*
|
||||
* SNMP statistics for UDP and UDP-Lite
|
||||
*/
|
||||
#define UDP_INC_STATS_USER(net, field, is_udplite) do { \
|
||||
if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field); \
|
||||
else SNMP_INC_STATS_USER((net)->mib.udp_statistics, field); } while(0)
|
||||
#define UDP_INC_STATS_BH(net, field, is_udplite) do { \
|
||||
if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \
|
||||
else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0)
|
||||
#define UDP_INC_STATS(net, field, is_udplite) do { \
|
||||
if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
|
||||
else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
|
||||
#define __UDP_INC_STATS(net, field, is_udplite) do { \
|
||||
if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
|
||||
else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
|
||||
|
||||
#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \
|
||||
if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\
|
||||
else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \
|
||||
#define __UDP6_INC_STATS(net, field, is_udplite) do { \
|
||||
if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
|
||||
else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
|
||||
} while(0)
|
||||
#define UDP6_INC_STATS_USER(net, field, __lite) do { \
|
||||
if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field); \
|
||||
else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \
|
||||
#define UDP6_INC_STATS(net, field, __lite) do { \
|
||||
if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
|
||||
else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
|
||||
} while(0)
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
#define UDPX_INC_STATS_BH(sk, field) \
|
||||
#define __UDPX_INC_STATS(sk, field) \
|
||||
do { \
|
||||
if ((sk)->sk_family == AF_INET) \
|
||||
UDP_INC_STATS_BH(sock_net(sk), field, 0); \
|
||||
__UDP_INC_STATS(sock_net(sk), field, 0); \
|
||||
else \
|
||||
UDP6_INC_STATS_BH(sock_net(sk), field, 0); \
|
||||
__UDP6_INC_STATS(sock_net(sk), field, 0); \
|
||||
} while (0)
|
||||
#else
|
||||
#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0)
|
||||
#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
|
||||
#endif
|
||||
|
||||
/* /proc */
|
||||
|
@ -45,12 +45,8 @@
|
||||
|
||||
#ifdef CONFIG_XFRM_STATISTICS
|
||||
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
|
||||
#define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
|
||||
#define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
|
||||
#else
|
||||
#define XFRM_INC_STATS(net, field) ((void)(net))
|
||||
#define XFRM_INC_STATS_BH(net, field) ((void)(net))
|
||||
#define XFRM_INC_STATS_USER(net, field) ((void)(net))
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -217,13 +217,13 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
|
||||
|
||||
len = ntohs(iph->tot_len);
|
||||
if (skb->len < len) {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
goto drop;
|
||||
} else if (len < (iph->ihl*4))
|
||||
goto inhdr_error;
|
||||
|
||||
if (pskb_trim_rcsum(skb, len)) {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -236,7 +236,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
|
||||
return 0;
|
||||
|
||||
inhdr_error:
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
|
||||
drop:
|
||||
return -1;
|
||||
}
|
||||
|
@ -122,13 +122,13 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
|
||||
|
||||
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
|
||||
if (pkt_len + ip6h_len > skb->len) {
|
||||
IP6_INC_STATS_BH(net, idev,
|
||||
IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
__IP6_INC_STATS(net, idev,
|
||||
IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
goto drop;
|
||||
}
|
||||
if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
|
||||
IP6_INC_STATS_BH(net, idev,
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
__IP6_INC_STATS(net, idev,
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
@ -142,7 +142,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
|
||||
return 0;
|
||||
|
||||
inhdr_error:
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
drop:
|
||||
return -1;
|
||||
}
|
||||
|
@ -4982,8 +4982,8 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
|
||||
netpoll_poll_unlock(have);
|
||||
}
|
||||
if (rc > 0)
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
|
||||
__NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
|
||||
local_bh_enable();
|
||||
|
||||
if (rc == LL_FLUSH_FAILED)
|
||||
|
@ -198,9 +198,9 @@ struct dccp_mib {
|
||||
};
|
||||
|
||||
DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
|
||||
#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
|
||||
#define DCCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(dccp_statistics, field)
|
||||
#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field)
|
||||
#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
|
||||
#define __DCCP_INC_STATS(field) __SNMP_INC_STATS(dccp_statistics, field)
|
||||
#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field)
|
||||
|
||||
/*
|
||||
* Checksumming routines
|
||||
|
@ -359,7 +359,7 @@ send_sync:
|
||||
goto discard;
|
||||
}
|
||||
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_INERRS);
|
||||
discard:
|
||||
__kfree_skb(skb);
|
||||
return 0;
|
||||
|
@ -205,7 +205,7 @@ void dccp_req_err(struct sock *sk, u64 seq)
|
||||
* socket here.
|
||||
*/
|
||||
if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
} else {
|
||||
/*
|
||||
* Still in RESPOND, just remove it silently.
|
||||
@ -247,7 +247,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
||||
|
||||
if (skb->len < offset + sizeof(*dh) ||
|
||||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -256,7 +256,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
||||
iph->saddr, ntohs(dh->dccph_sport),
|
||||
inet_iif(skb));
|
||||
if (!sk) {
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -273,7 +273,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
||||
* servers this needs to be solved differently.
|
||||
*/
|
||||
if (sock_owned_by_user(sk))
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
|
||||
if (sk->sk_state == DCCP_CLOSED)
|
||||
goto out;
|
||||
@ -281,7 +281,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
||||
dp = dccp_sk(sk);
|
||||
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
|
||||
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -318,7 +318,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
|
||||
case DCCP_REQUESTING:
|
||||
case DCCP_RESPOND:
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
|
||||
sk->sk_err = err;
|
||||
|
||||
sk->sk_error_report(sk);
|
||||
@ -431,11 +431,11 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
|
||||
return newsk;
|
||||
|
||||
exit_overflow:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
exit_nonewsk:
|
||||
dst_release(dst);
|
||||
exit:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
return NULL;
|
||||
put_and_exit:
|
||||
inet_csk_prepare_forced_close(newsk);
|
||||
@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
|
||||
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
|
||||
rt = ip_route_output_flow(net, &fl4, sk);
|
||||
if (IS_ERR(rt)) {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -533,8 +533,8 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
|
||||
bh_unlock_sock(ctl_sk);
|
||||
|
||||
if (net_xmit_eval(err) == 0) {
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
|
||||
}
|
||||
out:
|
||||
dst_release(dst);
|
||||
@ -637,7 +637,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
drop_and_free:
|
||||
reqsk_free(req);
|
||||
drop:
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
|
||||
|
@ -80,8 +80,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
||||
if (skb->len < offset + sizeof(*dh) ||
|
||||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
|
||||
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -91,8 +91,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
inet6_iif(skb));
|
||||
|
||||
if (!sk) {
|
||||
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -106,7 +106,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk))
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
|
||||
if (sk->sk_state == DCCP_CLOSED)
|
||||
goto out;
|
||||
@ -114,7 +114,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
dp = dccp_sk(sk);
|
||||
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
|
||||
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -156,7 +156,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
case DCCP_RESPOND: /* Cannot happen.
|
||||
It can, it SYNs are crossed. --ANK */
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
|
||||
sk->sk_err = err;
|
||||
/*
|
||||
* Wake people up to see the error
|
||||
@ -277,8 +277,8 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
|
||||
if (!IS_ERR(dst)) {
|
||||
skb_dst_set(skb, dst);
|
||||
ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -378,7 +378,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
drop_and_free:
|
||||
reqsk_free(req);
|
||||
drop:
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -527,11 +527,11 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
|
||||
return newsk;
|
||||
|
||||
out_overflow:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
out_nonewsk:
|
||||
dst_release(dst);
|
||||
out:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
|
||||
}
|
||||
dccp_init_xmit_timers(newsk);
|
||||
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS);
|
||||
}
|
||||
return newsk;
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ out_nonsensical_length:
|
||||
return 0;
|
||||
|
||||
out_invalid_option:
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
|
||||
__DCCP_INC_STATS(DCCP_MIB_INVALIDOPT);
|
||||
rc = DCCP_RESET_CODE_OPTION_ERROR;
|
||||
out_featneg_failed:
|
||||
DCCP_WARN("DCCP(%p): Option %d (len=%d) error=%u\n", sk, opt, len, rc);
|
||||
|
@ -28,7 +28,7 @@ static void dccp_write_err(struct sock *sk)
|
||||
|
||||
dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
|
||||
dccp_done(sk);
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
|
||||
__DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
|
||||
}
|
||||
|
||||
/* A write timeout has occurred. Process the after effects. */
|
||||
@ -100,7 +100,7 @@ static void dccp_retransmit_timer(struct sock *sk)
|
||||
* total number of retransmissions of clones of original packets.
|
||||
*/
|
||||
if (icsk->icsk_retransmits == 0)
|
||||
DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
|
||||
__DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
|
||||
|
||||
if (dccp_retransmit_skb(sk) != 0) {
|
||||
/*
|
||||
@ -179,7 +179,7 @@ static void dccp_delack_timer(unsigned long data)
|
||||
if (sock_owned_by_user(sk)) {
|
||||
/* Try again later. */
|
||||
icsk->icsk_ack.blocked = 1;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
|
||||
sk_reset_timer(sk, &icsk->icsk_delack_timer,
|
||||
jiffies + TCP_DELACK_MIN);
|
||||
goto out;
|
||||
@ -209,7 +209,7 @@ static void dccp_delack_timer(unsigned long data)
|
||||
icsk->icsk_ack.ato = TCP_ATO_MIN;
|
||||
}
|
||||
dccp_send_ack(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
|
||||
}
|
||||
out:
|
||||
bh_unlock_sock(sk);
|
||||
|
@ -436,7 +436,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
|
||||
if (IS_ERR(rt))
|
||||
return 1;
|
||||
if (rt->dst.dev != dev) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
|
||||
__NET_INC_STATS(net, LINUX_MIB_ARPFILTER);
|
||||
flag = 1;
|
||||
}
|
||||
ip_rt_put(rt);
|
||||
|
@ -363,7 +363,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
|
||||
icmp_param->data_len+icmp_param->head_len,
|
||||
icmp_param->head_len,
|
||||
ipc, rt, MSG_DONTWAIT) < 0) {
|
||||
ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
|
||||
__ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
|
||||
ip_flush_pending_frames(sk);
|
||||
} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
|
||||
struct icmphdr *icmph = icmp_hdr(skb);
|
||||
@ -744,7 +744,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
|
||||
* avoid additional coding at protocol handlers.
|
||||
*/
|
||||
if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
|
||||
ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -865,7 +865,7 @@ static bool icmp_unreach(struct sk_buff *skb)
|
||||
out:
|
||||
return true;
|
||||
out_err:
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -877,7 +877,7 @@ out_err:
|
||||
static bool icmp_redirect(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->len < sizeof(struct iphdr)) {
|
||||
ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -956,7 +956,7 @@ static bool icmp_timestamp(struct sk_buff *skb)
|
||||
return true;
|
||||
|
||||
out_err:
|
||||
ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -996,7 +996,7 @@ int icmp_rcv(struct sk_buff *skb)
|
||||
skb_set_network_header(skb, nh);
|
||||
}
|
||||
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INMSGS);
|
||||
|
||||
if (skb_checksum_simple_validate(skb))
|
||||
goto csum_error;
|
||||
@ -1006,7 +1006,7 @@ int icmp_rcv(struct sk_buff *skb)
|
||||
|
||||
icmph = icmp_hdr(skb);
|
||||
|
||||
ICMPMSGIN_INC_STATS_BH(net, icmph->type);
|
||||
ICMPMSGIN_INC_STATS(net, icmph->type);
|
||||
/*
|
||||
* 18 is the highest 'known' ICMP type. Anything else is a mystery
|
||||
*
|
||||
@ -1052,9 +1052,9 @@ drop:
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
csum_error:
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_CSUMERRORS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
|
||||
error:
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
@ -427,7 +427,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
|
||||
route_err:
|
||||
ip_rt_put(rt);
|
||||
no_route:
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_csk_route_req);
|
||||
@ -466,7 +466,7 @@ route_err:
|
||||
ip_rt_put(rt);
|
||||
no_route:
|
||||
rcu_read_unlock();
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
|
||||
|
@ -360,7 +360,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
|
||||
__sk_nulls_add_node_rcu(sk, &head->chain);
|
||||
if (tw) {
|
||||
sk_nulls_del_node_init_rcu((struct sock *)tw);
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
|
||||
}
|
||||
spin_unlock(lock);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
|
@ -147,9 +147,9 @@ static void tw_timer_handler(unsigned long data)
|
||||
struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
|
||||
|
||||
if (tw->tw_kill)
|
||||
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
|
||||
__NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
|
||||
else
|
||||
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
|
||||
__NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
|
||||
inet_twsk_kill(tw);
|
||||
}
|
||||
|
||||
|
@ -65,8 +65,8 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
|
||||
{
|
||||
struct ip_options *opt = &(IPCB(skb)->opt);
|
||||
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
__IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
|
||||
if (unlikely(opt->optlen))
|
||||
ip_forward_options(skb);
|
||||
@ -157,7 +157,7 @@ sr_failed:
|
||||
|
||||
too_many_hops:
|
||||
/* Tell the sender its packet died... */
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
|
||||
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
|
@ -204,14 +204,14 @@ static void ip_expire(unsigned long arg)
|
||||
goto out;
|
||||
|
||||
ipq_kill(qp);
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
||||
|
||||
if (!inet_frag_evicting(&qp->q)) {
|
||||
struct sk_buff *head = qp->q.fragments;
|
||||
const struct iphdr *iph;
|
||||
int err;
|
||||
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
|
||||
|
||||
if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
|
||||
goto out;
|
||||
@ -291,7 +291,7 @@ static int ip_frag_too_far(struct ipq *qp)
|
||||
struct net *net;
|
||||
|
||||
net = container_of(qp->q.net, struct net, ipv4.frags);
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
||||
}
|
||||
|
||||
return rc;
|
||||
@ -635,7 +635,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
||||
|
||||
ip_send_check(iph);
|
||||
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
|
||||
qp->q.fragments = NULL;
|
||||
qp->q.fragments_tail = NULL;
|
||||
return 0;
|
||||
@ -647,7 +647,7 @@ out_nomem:
|
||||
out_oversize:
|
||||
net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
|
||||
out_fail:
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -658,7 +658,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
|
||||
int vif = l3mdev_master_ifindex_rcu(dev);
|
||||
struct ipq *qp;
|
||||
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
|
||||
skb_orphan(skb);
|
||||
|
||||
/* Lookup (or create) queue header */
|
||||
@ -675,7 +675,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
|
||||
return ret;
|
||||
}
|
||||
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
||||
kfree_skb(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -218,17 +218,17 @@ static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_b
|
||||
protocol = -ret;
|
||||
goto resubmit;
|
||||
}
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
|
||||
} else {
|
||||
if (!raw) {
|
||||
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
|
||||
icmp_send(skb, ICMP_DEST_UNREACH,
|
||||
ICMP_PROT_UNREACH, 0);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
} else {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
|
||||
consume_skb(skb);
|
||||
}
|
||||
}
|
||||
@ -273,7 +273,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
|
||||
--ANK (980813)
|
||||
*/
|
||||
if (skb_cow(skb, skb_headroom(skb))) {
|
||||
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
|
||||
__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -282,7 +282,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
|
||||
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
|
||||
|
||||
if (ip_options_compile(dev_net(dev), opt, skb)) {
|
||||
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
|
||||
__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -337,7 +337,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
iph->tos, skb->dev);
|
||||
if (unlikely(err)) {
|
||||
if (err == -EXDEV)
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_IPRPFILTER);
|
||||
__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
@ -358,9 +358,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
rt = skb_rtable(skb);
|
||||
if (rt->rt_type == RTN_MULTICAST) {
|
||||
IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INMCAST, skb->len);
|
||||
__IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
|
||||
} else if (rt->rt_type == RTN_BROADCAST) {
|
||||
IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INBCAST, skb->len);
|
||||
__IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
|
||||
} else if (skb->pkt_type == PACKET_BROADCAST ||
|
||||
skb->pkt_type == PACKET_MULTICAST) {
|
||||
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
|
||||
@ -409,11 +409,11 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
|
||||
|
||||
net = dev_net(dev);
|
||||
IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_IN, skb->len);
|
||||
__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
|
||||
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -439,9 +439,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
|
||||
BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
|
||||
BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
|
||||
IP_ADD_STATS_BH(net,
|
||||
IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
|
||||
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
|
||||
__IP_ADD_STATS(net,
|
||||
IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
|
||||
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
|
||||
|
||||
if (!pskb_may_pull(skb, iph->ihl*4))
|
||||
goto inhdr_error;
|
||||
@ -453,7 +453,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
|
||||
len = ntohs(iph->tot_len);
|
||||
if (skb->len < len) {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
goto drop;
|
||||
} else if (len < (iph->ihl*4))
|
||||
goto inhdr_error;
|
||||
@ -463,7 +463,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
* Note this now means skb->len holds ntohs(iph->tot_len).
|
||||
*/
|
||||
if (pskb_trim_rcsum(skb, len)) {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -480,9 +480,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
ip_rcv_finish);
|
||||
|
||||
csum_error:
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_CSUMERRORS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
|
||||
inhdr_error:
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
|
@ -915,11 +915,11 @@ static int ip_error(struct sk_buff *skb)
|
||||
if (!IN_DEV_FORWARD(in_dev)) {
|
||||
switch (rt->dst.error) {
|
||||
case EHOSTUNREACH:
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
|
||||
break;
|
||||
|
||||
case ENETUNREACH:
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
@ -934,7 +934,7 @@ static int ip_error(struct sk_buff *skb)
|
||||
break;
|
||||
case ENETUNREACH:
|
||||
code = ICMP_NET_UNREACH;
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
|
||||
break;
|
||||
case EACCES:
|
||||
code = ICMP_PKT_FILTERED;
|
||||
|
@ -312,11 +312,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
|
||||
if (mss == 0) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
|
||||
goto out;
|
||||
}
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
|
||||
|
||||
/* check for timestamp cookie support */
|
||||
memset(&tcp_opt, 0, sizeof(tcp_opt));
|
||||
|
@ -1443,7 +1443,7 @@ static void tcp_prequeue_process(struct sock *sk)
|
||||
struct sk_buff *skb;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
|
||||
|
||||
/* RX process wants to run with disabled BHs, though it is not
|
||||
* necessary */
|
||||
@ -1777,7 +1777,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
|
||||
|
||||
chunk = len - tp->ucopy.len;
|
||||
if (chunk != 0) {
|
||||
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
|
||||
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
|
||||
len -= chunk;
|
||||
copied += chunk;
|
||||
}
|
||||
@ -1789,7 +1789,7 @@ do_prequeue:
|
||||
|
||||
chunk = len - tp->ucopy.len;
|
||||
if (chunk != 0) {
|
||||
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
|
||||
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
|
||||
len -= chunk;
|
||||
copied += chunk;
|
||||
}
|
||||
@ -1875,7 +1875,7 @@ skip_copy:
|
||||
tcp_prequeue_process(sk);
|
||||
|
||||
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
|
||||
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
|
||||
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
|
||||
len -= chunk;
|
||||
copied += chunk;
|
||||
}
|
||||
@ -2065,13 +2065,13 @@ void tcp_close(struct sock *sk, long timeout)
|
||||
sk->sk_prot->disconnect(sk, 0);
|
||||
} else if (data_was_unread) {
|
||||
/* Unread data was tossed, zap the connection. */
|
||||
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_send_active_reset(sk, sk->sk_allocation);
|
||||
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
|
||||
/* Check zero linger _after_ checking for unread data. */
|
||||
sk->sk_prot->disconnect(sk, 0);
|
||||
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
} else if (tcp_close_state(sk)) {
|
||||
/* We FIN if the application ate all the data before
|
||||
* zapping the connection.
|
||||
@ -2148,7 +2148,7 @@ adjudge_to_death:
|
||||
if (tp->linger2 < 0) {
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPABORTONLINGER);
|
||||
} else {
|
||||
const int tmo = tcp_fin_time(sk);
|
||||
@ -2167,7 +2167,7 @@ adjudge_to_death:
|
||||
if (tcp_check_oom(sk, 0)) {
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPABORTONMEMORY);
|
||||
}
|
||||
}
|
||||
@ -3091,7 +3091,7 @@ void tcp_done(struct sock *sk)
|
||||
struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
|
||||
|
||||
if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
|
||||
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_clear_xmit_timers(sk);
|
||||
|
@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
|
||||
|
||||
ca->last_ack = now_us;
|
||||
if (after(now_us, ca->round_start + base_owd)) {
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINDETECT);
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
||||
tp->snd_cwnd);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINDETECT);
|
||||
__NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
||||
tp->snd_cwnd);
|
||||
tp->snd_ssthresh = tp->snd_cwnd;
|
||||
return;
|
||||
}
|
||||
@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
|
||||
125U);
|
||||
|
||||
if (ca->rtt.min > thresh) {
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYDETECT);
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
||||
tp->snd_cwnd);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYDETECT);
|
||||
__NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
||||
tp->snd_cwnd);
|
||||
tp->snd_ssthresh = tp->snd_cwnd;
|
||||
}
|
||||
}
|
||||
|
@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay)
|
||||
ca->last_ack = now;
|
||||
if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
|
||||
ca->found |= HYSTART_ACK_TRAIN;
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINDETECT);
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
||||
tp->snd_cwnd);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINDETECT);
|
||||
__NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
||||
tp->snd_cwnd);
|
||||
tp->snd_ssthresh = tp->snd_cwnd;
|
||||
}
|
||||
}
|
||||
@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay)
|
||||
if (ca->curr_rtt > ca->delay_min +
|
||||
HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
|
||||
ca->found |= HYSTART_DELAY;
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYDETECT);
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
||||
tp->snd_cwnd);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYDETECT);
|
||||
__NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
||||
tp->snd_cwnd);
|
||||
tp->snd_ssthresh = tp->snd_cwnd;
|
||||
}
|
||||
}
|
||||
|
@ -256,8 +256,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
|
||||
req1 = fastopenq->rskq_rst_head;
|
||||
if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
|
||||
spin_unlock(&fastopenq->lock);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
|
||||
return false;
|
||||
}
|
||||
fastopenq->rskq_rst_head = req1->dl_next;
|
||||
@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
|
||||
struct sock *child;
|
||||
|
||||
if (foc->len == 0) /* Client requests a cookie */
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
|
||||
|
||||
if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
|
||||
(syn_data || foc->len >= 0) &&
|
||||
@ -311,13 +311,13 @@ fastopen:
|
||||
child = tcp_fastopen_create_child(sk, skb, dst, req);
|
||||
if (child) {
|
||||
foc->len = -1;
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENPASSIVE);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENPASSIVE);
|
||||
return child;
|
||||
}
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
|
||||
} else if (foc->len > 0) /* Client presents an invalid cookie */
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
|
||||
|
||||
valid_foc.exp = foc->exp;
|
||||
*foc = valid_foc;
|
||||
|
@ -869,7 +869,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
|
||||
else
|
||||
mib_idx = LINUX_MIB_TCPSACKREORDER;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
#if FASTRETRANS_DEBUG > 1
|
||||
pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
|
||||
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
|
||||
@ -1062,7 +1062,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
|
||||
dup_sack = true;
|
||||
tcp_dsack_seen(tp);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
|
||||
} else if (num_sacks > 1) {
|
||||
u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
|
||||
u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
|
||||
@ -1071,7 +1071,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
!before(start_seq_0, start_seq_1)) {
|
||||
dup_sack = true;
|
||||
tcp_dsack_seen(tp);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPDSACKOFORECV);
|
||||
}
|
||||
}
|
||||
@ -1289,7 +1289,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
if (skb->len > 0) {
|
||||
BUG_ON(!tcp_skb_pcount(skb));
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1313,7 +1313,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
||||
tcp_unlink_write_queue(skb, sk);
|
||||
sk_wmem_free_skb(sk, skb);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1469,7 +1469,7 @@ noop:
|
||||
return skb;
|
||||
|
||||
fallback:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1657,7 +1657,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
mib_idx = LINUX_MIB_TCPSACKDISCARD;
|
||||
}
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
if (i == 0)
|
||||
first_sack_index = -1;
|
||||
continue;
|
||||
@ -1909,7 +1909,7 @@ void tcp_enter_loss(struct sock *sk)
|
||||
skb = tcp_write_queue_head(sk);
|
||||
is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
|
||||
if (is_reneg) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
|
||||
tp->sacked_out = 0;
|
||||
tp->fackets_out = 0;
|
||||
}
|
||||
@ -2395,7 +2395,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
|
||||
else
|
||||
mib_idx = LINUX_MIB_TCPFULLUNDO;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
}
|
||||
if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
|
||||
/* Hold old state until something *above* high_seq
|
||||
@ -2417,7 +2417,7 @@ static bool tcp_try_undo_dsack(struct sock *sk)
|
||||
if (tp->undo_marker && !tp->undo_retrans) {
|
||||
DBGUNDO(sk, "D-SACK");
|
||||
tcp_undo_cwnd_reduction(sk, false);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -2432,10 +2432,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
||||
tcp_undo_cwnd_reduction(sk, true);
|
||||
|
||||
DBGUNDO(sk, "partial loss");
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
|
||||
if (frto_undo)
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPSPURIOUSRTOS);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPSPURIOUSRTOS);
|
||||
inet_csk(sk)->icsk_retransmits = 0;
|
||||
if (frto_undo || tcp_is_sack(tp))
|
||||
tcp_set_ca_state(sk, TCP_CA_Open);
|
||||
@ -2559,7 +2559,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
|
||||
|
||||
icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
|
||||
icsk->icsk_mtup.probe_size = 0;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
|
||||
}
|
||||
|
||||
static void tcp_mtup_probe_success(struct sock *sk)
|
||||
@ -2579,7 +2579,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
|
||||
icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
|
||||
icsk->icsk_mtup.probe_size = 0;
|
||||
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
|
||||
}
|
||||
|
||||
/* Do a simple retransmit without using the backoff mechanisms in
|
||||
@ -2643,7 +2643,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
|
||||
else
|
||||
mib_idx = LINUX_MIB_TCPSACKRECOVERY;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
|
||||
tp->prior_ssthresh = 0;
|
||||
tcp_init_undo(tp);
|
||||
@ -2736,7 +2736,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
|
||||
|
||||
DBGUNDO(sk, "partial recovery");
|
||||
tcp_undo_cwnd_reduction(sk, true);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
|
||||
tcp_try_keep_open(sk);
|
||||
return true;
|
||||
}
|
||||
@ -3431,7 +3431,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
|
||||
s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
|
||||
|
||||
if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
|
||||
NET_INC_STATS_BH(net, mib_idx);
|
||||
__NET_INC_STATS(net, mib_idx);
|
||||
return true; /* rate-limited: don't send yet! */
|
||||
}
|
||||
}
|
||||
@ -3464,7 +3464,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
|
||||
challenge_count = 0;
|
||||
}
|
||||
if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
|
||||
tcp_send_ack(sk);
|
||||
}
|
||||
}
|
||||
@ -3513,8 +3513,8 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
|
||||
tcp_set_ca_state(sk, TCP_CA_CWR);
|
||||
tcp_end_cwnd_reduction(sk);
|
||||
tcp_try_keep_open(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPLOSSPROBERECOVERY);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPLOSSPROBERECOVERY);
|
||||
} else if (!(flag & (FLAG_SND_UNA_ADVANCED |
|
||||
FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
|
||||
/* Pure dupack: original and TLP probe arrived; no loss */
|
||||
@ -3618,14 +3618,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
|
||||
tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
|
||||
} else {
|
||||
u32 ack_ev_flags = CA_ACK_SLOWPATH;
|
||||
|
||||
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
|
||||
flag |= FLAG_DATA;
|
||||
else
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
|
||||
|
||||
flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
|
||||
|
||||
@ -4128,7 +4128,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
|
||||
else
|
||||
mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
|
||||
tp->rx_opt.dsack = 1;
|
||||
tp->duplicate_sack[0].start_seq = seq;
|
||||
@ -4152,7 +4152,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
|
||||
|
||||
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
||||
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
tcp_enter_quickack_mode(sk);
|
||||
|
||||
if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
|
||||
@ -4302,7 +4302,7 @@ static bool tcp_try_coalesce(struct sock *sk,
|
||||
|
||||
atomic_add(delta, &sk->sk_rmem_alloc);
|
||||
sk_mem_charge(sk, delta);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
|
||||
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
|
||||
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
|
||||
TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
|
||||
@ -4390,7 +4390,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
tcp_ecn_check_ce(tp, skb);
|
||||
|
||||
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
|
||||
tcp_drop(sk, skb);
|
||||
return;
|
||||
}
|
||||
@ -4399,7 +4399,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
tp->pred_flags = 0;
|
||||
inet_csk_schedule_ack(sk);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
|
||||
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
|
||||
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
|
||||
|
||||
@ -4454,7 +4454,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
/* All the bits are present. Drop. */
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
tcp_drop(sk, skb);
|
||||
skb = NULL;
|
||||
tcp_dsack_set(sk, seq, end_seq);
|
||||
@ -4493,7 +4493,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
__skb_unlink(skb1, &tp->out_of_order_queue);
|
||||
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
|
||||
TCP_SKB_CB(skb1)->end_seq);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
tcp_drop(sk, skb1);
|
||||
}
|
||||
|
||||
@ -4658,7 +4658,7 @@ queue_and_out:
|
||||
|
||||
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
|
||||
/* A retransmit, 2nd most common case. Force an immediate ack. */
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
|
||||
|
||||
out_of_window:
|
||||
@ -4704,7 +4704,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
__skb_unlink(skb, list);
|
||||
__kfree_skb(skb);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
|
||||
|
||||
return next;
|
||||
}
|
||||
@ -4863,7 +4863,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
|
||||
bool res = false;
|
||||
|
||||
if (!skb_queue_empty(&tp->out_of_order_queue)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
|
||||
__skb_queue_purge(&tp->out_of_order_queue);
|
||||
|
||||
/* Reset SACK state. A conforming SACK implementation will
|
||||
@ -4892,7 +4892,7 @@ static int tcp_prune_queue(struct sock *sk)
|
||||
|
||||
SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||
tcp_clamp_window(sk);
|
||||
@ -4922,7 +4922,7 @@ static int tcp_prune_queue(struct sock *sk)
|
||||
* drop receive data on the floor. It will get retransmitted
|
||||
* and hopefully then we'll have sufficient space.
|
||||
*/
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
|
||||
|
||||
/* Massive buffer overcommit. */
|
||||
tp->pred_flags = 0;
|
||||
@ -5181,7 +5181,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
|
||||
if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
|
||||
tcp_paws_discard(sk, skb)) {
|
||||
if (!th->rst) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
|
||||
if (!tcp_oow_rate_limited(sock_net(sk), skb,
|
||||
LINUX_MIB_TCPACKSKIPPEDPAWS,
|
||||
&tp->last_oow_ack_time))
|
||||
@ -5233,8 +5233,8 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
|
||||
if (th->syn) {
|
||||
syn_challenge:
|
||||
if (syn_inerr)
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
|
||||
tcp_send_challenge_ack(sk, skb);
|
||||
goto discard;
|
||||
}
|
||||
@ -5349,7 +5349,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
tcp_data_snd_check(sk);
|
||||
return;
|
||||
} else { /* Header too small */
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
|
||||
goto discard;
|
||||
}
|
||||
} else {
|
||||
@ -5377,7 +5377,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
__skb_pull(skb, tcp_header_len);
|
||||
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHPHITSTOUSER);
|
||||
eaten = 1;
|
||||
}
|
||||
}
|
||||
@ -5399,7 +5400,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
tcp_rcv_rtt_measure_ts(sk, skb);
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
|
||||
|
||||
/* Bulk data transfer: receiver */
|
||||
eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
|
||||
@ -5456,8 +5457,8 @@ step5:
|
||||
return;
|
||||
|
||||
csum_error:
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
|
||||
|
||||
discard:
|
||||
tcp_drop(sk, skb);
|
||||
@ -5549,12 +5550,14 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
|
||||
break;
|
||||
}
|
||||
tcp_rearm_rto(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
return true;
|
||||
}
|
||||
tp->syn_data_acked = tp->syn_data;
|
||||
if (tp->syn_data_acked)
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVE);
|
||||
|
||||
tcp_fastopen_add_skb(sk, synack);
|
||||
|
||||
@ -5589,7 +5592,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
|
||||
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
|
||||
tcp_time_stamp)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_PAWSACTIVEREJECTED);
|
||||
goto reset_and_undo;
|
||||
}
|
||||
|
||||
@ -5958,7 +5962,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
||||
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
|
||||
tcp_done(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -6015,7 +6019,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
if (sk->sk_shutdown & RCV_SHUTDOWN) {
|
||||
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
||||
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
||||
tcp_reset(sk);
|
||||
return 1;
|
||||
}
|
||||
@ -6153,10 +6157,10 @@ static bool tcp_syn_flood_action(const struct sock *sk,
|
||||
if (net->ipv4.sysctl_tcp_syncookies) {
|
||||
msg = "Sending cookies";
|
||||
want_cookie = true;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
|
||||
} else
|
||||
#endif
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
|
||||
|
||||
if (!queue->synflood_warned &&
|
||||
net->ipv4.sysctl_tcp_syncookies != 2 &&
|
||||
@ -6217,7 +6221,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
* timeout.
|
||||
*/
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -6264,7 +6268,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
if (dst && strict &&
|
||||
!tcp_peer_is_proven(req, dst, true,
|
||||
tmp_opt.saw_tstamp)) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
|
||||
goto drop_and_release;
|
||||
}
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
|
||||
* an established socket here.
|
||||
*/
|
||||
if (seq != tcp_rsk(req)->snt_isn) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
} else if (abort) {
|
||||
/*
|
||||
* Still in SYN_RECV, just remove it silently.
|
||||
@ -372,7 +372,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
||||
th->dest, iph->saddr, ntohs(th->source),
|
||||
inet_iif(icmp_skb));
|
||||
if (!sk) {
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
if (sk->sk_state == TCP_TIME_WAIT) {
|
||||
@ -396,13 +396,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
||||
*/
|
||||
if (sock_owned_by_user(sk)) {
|
||||
if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
}
|
||||
if (sk->sk_state == TCP_CLOSE)
|
||||
goto out;
|
||||
|
||||
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -413,7 +413,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
|
||||
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
|
||||
if (sk->sk_state != TCP_LISTEN &&
|
||||
!between(seq, snd_una, tp->snd_nxt)) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -697,8 +697,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
|
||||
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
|
||||
&arg, arg.iov[0].iov_len);
|
||||
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
|
||||
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
out:
|
||||
@ -779,7 +779,7 @@ static void tcp_v4_send_ack(struct net *net,
|
||||
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
|
||||
&arg, arg.iov[0].iov_len);
|
||||
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
|
||||
}
|
||||
|
||||
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
|
||||
@ -1151,12 +1151,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
|
||||
return false;
|
||||
|
||||
if (hash_expected && !hash_location) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!hash_expected && hash_location) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1342,7 +1342,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
return newsk;
|
||||
|
||||
exit_overflow:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
exit_nonewsk:
|
||||
dst_release(dst);
|
||||
exit:
|
||||
@ -1432,8 +1432,8 @@ discard:
|
||||
return 0;
|
||||
|
||||
csum_err:
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
|
||||
goto discard;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_v4_do_rcv);
|
||||
@ -1513,8 +1513,8 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
|
||||
sk_backlog_rcv(sk, skb1);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPPREQUEUEDROPPED);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPPREQUEUEDROPPED);
|
||||
}
|
||||
|
||||
tp->ucopy.memory = 0;
|
||||
@ -1547,7 +1547,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
||||
goto discard_it;
|
||||
|
||||
/* Count it even if it's bad */
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_INSEGS);
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
|
||||
goto discard_it;
|
||||
@ -1629,7 +1629,7 @@ process:
|
||||
}
|
||||
}
|
||||
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
|
||||
goto discard_and_relse;
|
||||
}
|
||||
|
||||
@ -1662,7 +1662,7 @@ process:
|
||||
} else if (unlikely(sk_add_backlog(sk, skb,
|
||||
sk->sk_rcvbuf + sk->sk_sndbuf))) {
|
||||
bh_unlock_sock(sk);
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
|
||||
goto discard_and_relse;
|
||||
}
|
||||
bh_unlock_sock(sk);
|
||||
@ -1679,9 +1679,9 @@ no_tcp_socket:
|
||||
|
||||
if (tcp_checksum_complete(skb)) {
|
||||
csum_error:
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
|
||||
bad_packet:
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_INERRS);
|
||||
} else {
|
||||
tcp_v4_send_reset(NULL, skb);
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ kill:
|
||||
}
|
||||
|
||||
if (paws_reject)
|
||||
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
|
||||
__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
|
||||
|
||||
if (!th->rst) {
|
||||
/* In this case we must reset the TIMEWAIT timer.
|
||||
@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
|
||||
* socket up. We've got bigger problems than
|
||||
* non-graceful socket closings.
|
||||
*/
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
|
||||
}
|
||||
|
||||
tcp_update_metrics(sk);
|
||||
@ -545,7 +545,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
|
||||
newtp->rack.mstamp.v64 = 0;
|
||||
newtp->rack.advanced = 0;
|
||||
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
|
||||
}
|
||||
return newsk;
|
||||
}
|
||||
@ -710,7 +710,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
||||
&tcp_rsk(req)->last_oow_ack_time))
|
||||
req->rsk_ops->send_ack(sk, skb, req);
|
||||
if (paws_reject)
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -729,7 +729,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
||||
* "fourth, check the SYN bit"
|
||||
*/
|
||||
if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
|
||||
goto embryonic_reset;
|
||||
}
|
||||
|
||||
@ -752,7 +752,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
||||
if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
|
||||
inet_rsk(req)->acked = 1;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -791,7 +791,7 @@ embryonic_reset:
|
||||
}
|
||||
if (!fastopen) {
|
||||
inet_csk_reqsk_queue_drop(sk, req);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2212,8 +2212,8 @@ static bool skb_still_in_host_queue(const struct sock *sk,
|
||||
const struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(skb_fclone_busy(sk, skb))) {
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -2275,7 +2275,7 @@ void tcp_send_loss_probe(struct sock *sk)
|
||||
tp->tlp_high_seq = tp->snd_nxt;
|
||||
|
||||
probe_sent:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
|
||||
/* Reset s.t. tcp_rearm_rto will restart timer from now */
|
||||
inet_csk(sk)->icsk_pending = 0;
|
||||
rearm_timer:
|
||||
@ -2656,7 +2656,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
|
||||
/* Update global TCP statistics. */
|
||||
TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
|
||||
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
|
||||
tp->total_retrans += segs;
|
||||
}
|
||||
return err;
|
||||
@ -2681,7 +2681,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
|
||||
tp->retrans_stamp = tcp_skb_timestamp(skb);
|
||||
|
||||
} else if (err != -EBUSY) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
|
||||
}
|
||||
|
||||
if (tp->undo_retrans < 0)
|
||||
@ -2805,7 +2805,7 @@ begin_fwd:
|
||||
if (tcp_retransmit_skb(sk, skb, segs))
|
||||
return;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
|
||||
if (tcp_in_cwnd_reduction(sk))
|
||||
tp->prr_out += tcp_skb_pcount(skb);
|
||||
@ -3042,7 +3042,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
|
||||
th->window = htons(min(req->rsk_rcv_wnd, 65535U));
|
||||
tcp_options_write((__be32 *)(th + 1), NULL, &opts);
|
||||
th->doff = (tcp_header_size >> 2);
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
|
||||
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
/* Okay, we have all we need - do the md5 hash if needed */
|
||||
@ -3540,8 +3540,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
|
||||
tcp_rsk(req)->txhash = net_tx_rndhash();
|
||||
res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
|
||||
if (!res) {
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk)
|
||||
if (scb->sacked & TCPCB_SACKED_RETRANS) {
|
||||
scb->sacked &= ~TCPCB_SACKED_RETRANS;
|
||||
tp->retrans_out -= tcp_skb_pcount(skb);
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPLOSTRETRANSMIT);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPLOSTRETRANSMIT);
|
||||
}
|
||||
} else if (!(scb->sacked & TCPCB_RETRANS)) {
|
||||
/* Original data are sent sequentially so stop early
|
||||
|
@ -30,7 +30,7 @@ static void tcp_write_err(struct sock *sk)
|
||||
sk->sk_error_report(sk);
|
||||
|
||||
tcp_done(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
|
||||
}
|
||||
|
||||
/* Do not allow orphaned sockets to eat all our resources.
|
||||
@ -68,7 +68,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
|
||||
if (do_reset)
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
tcp_done(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk)
|
||||
if (tp->syn_fastopen || tp->syn_data)
|
||||
tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
|
||||
if (tp->syn_data && icsk->icsk_retransmits == 1)
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
}
|
||||
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
|
||||
syn_set = true;
|
||||
@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk)
|
||||
tp->bytes_acked <= tp->rx_opt.mss_clamp) {
|
||||
tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
|
||||
if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
|
||||
NET_INC_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
|
||||
}
|
||||
/* Black hole detection */
|
||||
tcp_mtu_probing(icsk, sk);
|
||||
@ -228,7 +228,7 @@ void tcp_delack_timer_handler(struct sock *sk)
|
||||
if (!skb_queue_empty(&tp->ucopy.prequeue)) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
|
||||
|
||||
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
||||
sk_backlog_rcv(sk, skb);
|
||||
@ -248,7 +248,7 @@ void tcp_delack_timer_handler(struct sock *sk)
|
||||
icsk->icsk_ack.ato = TCP_ATO_MIN;
|
||||
}
|
||||
tcp_send_ack(sk);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -265,7 +265,7 @@ static void tcp_delack_timer(unsigned long data)
|
||||
tcp_delack_timer_handler(sk);
|
||||
} else {
|
||||
inet_csk(sk)->icsk_ack.blocked = 1;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
|
||||
/* deleguate our work to tcp_release_cb() */
|
||||
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
|
||||
sock_hold(sk);
|
||||
@ -431,7 +431,7 @@ void tcp_retransmit_timer(struct sock *sk)
|
||||
} else {
|
||||
mib_idx = LINUX_MIB_TCPTIMEOUTS;
|
||||
}
|
||||
NET_INC_STATS_BH(sock_net(sk), mib_idx);
|
||||
__NET_INC_STATS(sock_net(sk), mib_idx);
|
||||
}
|
||||
|
||||
tcp_enter_loss(sk);
|
||||
@ -549,7 +549,7 @@ void tcp_syn_ack_timeout(const struct request_sock *req)
|
||||
{
|
||||
struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
|
||||
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_syn_ack_timeout);
|
||||
|
||||
|
@ -688,7 +688,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
|
||||
iph->saddr, uh->source, skb->dev->ifindex, udptable,
|
||||
NULL);
|
||||
if (!sk) {
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
|
||||
return; /* No socket for error */
|
||||
}
|
||||
|
||||
@ -882,13 +882,13 @@ send:
|
||||
err = ip_send_skb(sock_net(sk), skb);
|
||||
if (err) {
|
||||
if (err == -ENOBUFS && !inet->recverr) {
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
err = 0;
|
||||
}
|
||||
} else
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_OUTDATAGRAMS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_OUTDATAGRAMS, is_udplite);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1157,8 +1157,8 @@ out:
|
||||
* seems like overkill.
|
||||
*/
|
||||
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
}
|
||||
return err;
|
||||
|
||||
@ -1242,10 +1242,10 @@ static unsigned int first_packet_length(struct sock *sk)
|
||||
spin_lock_bh(&rcvq->lock);
|
||||
while ((skb = skb_peek(rcvq)) != NULL &&
|
||||
udp_lib_checksum_complete(skb)) {
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
atomic_inc(&sk->sk_drops);
|
||||
__skb_unlink(skb, rcvq);
|
||||
__skb_queue_tail(&list_kill, skb);
|
||||
@ -1352,16 +1352,16 @@ try_again:
|
||||
trace_kfree_skb(skb, udp_recvmsg);
|
||||
if (!peeked) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
}
|
||||
skb_free_datagram_locked(sk, skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!peeked)
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INDATAGRAMS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_INDATAGRAMS, is_udplite);
|
||||
|
||||
sock_recv_ts_and_drops(msg, sk, skb);
|
||||
|
||||
@ -1386,8 +1386,8 @@ try_again:
|
||||
csum_copy_err:
|
||||
slow = lock_sock_fast(sk);
|
||||
if (!skb_kill_datagram(sk, skb, flags)) {
|
||||
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
}
|
||||
unlock_sock_fast(sk, slow);
|
||||
|
||||
@ -1514,9 +1514,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
/* Note that an ENOMEM error is charged twice */
|
||||
if (rc == -ENOMEM)
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
|
||||
is_udplite);
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
|
||||
is_udplite);
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
kfree_skb(skb);
|
||||
trace_udp_fail_queue_rcv_skb(rc, sk);
|
||||
return -1;
|
||||
@ -1580,9 +1580,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
ret = encap_rcv(sk, skb);
|
||||
if (ret <= 0) {
|
||||
UDP_INC_STATS_BH(sock_net(sk),
|
||||
UDP_MIB_INDATAGRAMS,
|
||||
is_udplite);
|
||||
__UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_INDATAGRAMS,
|
||||
is_udplite);
|
||||
return -ret;
|
||||
}
|
||||
}
|
||||
@ -1633,8 +1633,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
udp_csum_pull_header(skb);
|
||||
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
|
||||
is_udplite);
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
|
||||
is_udplite);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -1653,9 +1653,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
return rc;
|
||||
|
||||
csum_error:
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
drop:
|
||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
@ -1715,10 +1715,10 @@ start_lookup:
|
||||
|
||||
if (unlikely(!nskb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
UDP_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
UDP_INC_STATS_BH(net, UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(net, UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
continue;
|
||||
}
|
||||
if (udp_queue_rcv_skb(sk, nskb) > 0)
|
||||
@ -1736,8 +1736,8 @@ start_lookup:
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
|
||||
proto == IPPROTO_UDPLITE);
|
||||
__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
|
||||
proto == IPPROTO_UDPLITE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -1851,7 +1851,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
||||
if (udp_lib_checksum_complete(skb))
|
||||
goto csum_error;
|
||||
|
||||
UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
|
||||
__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
|
||||
|
||||
/*
|
||||
@ -1878,9 +1878,9 @@ csum_error:
|
||||
proto == IPPROTO_UDPLITE ? "Lite" : "",
|
||||
&saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
|
||||
ulen);
|
||||
UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
|
||||
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
|
||||
drop:
|
||||
UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
|
||||
__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
@ -258,8 +258,8 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
|
||||
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
|
||||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
|
||||
((skb_transport_header(skb)[1] + 1) << 3)))) {
|
||||
IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -280,8 +280,8 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
|
||||
return 1;
|
||||
}
|
||||
|
||||
IP6_INC_STATS_BH(dev_net(dst->dev),
|
||||
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(dev_net(dst->dev),
|
||||
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -309,8 +309,8 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
|
||||
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
|
||||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
|
||||
((skb_transport_header(skb)[1] + 1) << 3)))) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -319,8 +319,8 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
|
||||
|
||||
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
|
||||
skb->pkt_type != PACKET_HOST) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -334,8 +334,8 @@ looped_back:
|
||||
* processed by own
|
||||
*/
|
||||
if (!addr) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -360,8 +360,8 @@ looped_back:
|
||||
goto unknown_rh;
|
||||
/* Silently discard invalid RTH type 2 */
|
||||
if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -379,8 +379,8 @@ looped_back:
|
||||
n = hdr->hdrlen >> 1;
|
||||
|
||||
if (hdr->segments_left > n) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
|
||||
((&hdr->segments_left) -
|
||||
skb_network_header(skb)));
|
||||
@ -393,8 +393,8 @@ looped_back:
|
||||
if (skb_cloned(skb)) {
|
||||
/* the copy is a forwarded packet */
|
||||
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTDISCARDS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTDISCARDS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -416,14 +416,14 @@ looped_back:
|
||||
if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
|
||||
(xfrm_address_t *)&ipv6_hdr(skb)->saddr,
|
||||
IPPROTO_ROUTING) < 0) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -434,8 +434,8 @@ looped_back:
|
||||
}
|
||||
|
||||
if (ipv6_addr_is_multicast(addr)) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INADDRERRORS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -454,8 +454,8 @@ looped_back:
|
||||
|
||||
if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
|
||||
if (ipv6_hdr(skb)->hop_limit <= 1) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
|
||||
0);
|
||||
kfree_skb(skb);
|
||||
@ -470,7 +470,7 @@ looped_back:
|
||||
return -1;
|
||||
|
||||
unknown_rh:
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
|
||||
(&hdr->type) - skb_network_header(skb));
|
||||
return -1;
|
||||
@ -568,28 +568,28 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
|
||||
if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
|
||||
net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
|
||||
nh[optoff+1]);
|
||||
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
|
||||
if (pkt_len <= IPV6_MAXPLEN) {
|
||||
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
|
||||
return false;
|
||||
}
|
||||
if (ipv6_hdr(skb)->payload_len) {
|
||||
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
|
||||
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
__IP6_INC_STATS(net, ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
@ -622,7 +622,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
|
||||
np->dontfrag, &sockc_unused);
|
||||
|
||||
if (err) {
|
||||
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
|
||||
__ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
|
||||
ip6_flush_pending_frames(sk);
|
||||
} else {
|
||||
err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
|
||||
@ -674,7 +674,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
|
||||
return;
|
||||
|
||||
out:
|
||||
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
|
||||
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -710,7 +710,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
|
||||
skb_set_network_header(skb, nh);
|
||||
}
|
||||
|
||||
ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
|
||||
__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
|
||||
|
||||
saddr = &ipv6_hdr(skb)->saddr;
|
||||
daddr = &ipv6_hdr(skb)->daddr;
|
||||
@ -728,7 +728,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
|
||||
|
||||
type = hdr->icmp6_type;
|
||||
|
||||
ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
|
||||
ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
|
||||
|
||||
switch (type) {
|
||||
case ICMPV6_ECHO_REQUEST:
|
||||
@ -812,9 +812,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
|
||||
return 0;
|
||||
|
||||
csum_error:
|
||||
ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
|
||||
__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
|
||||
discard_it:
|
||||
ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
|
||||
__ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
|
||||
drop_no_count:
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
|
@ -222,7 +222,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
|
||||
__sk_nulls_add_node_rcu(sk, &head->chain);
|
||||
if (tw) {
|
||||
sk_nulls_del_node_init_rcu((struct sock *)tw);
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
|
||||
}
|
||||
spin_unlock(lock);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
|
@ -78,11 +78,11 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
||||
|
||||
idev = __in6_dev_get(skb->dev);
|
||||
|
||||
IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len);
|
||||
__IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
|
||||
|
||||
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
|
||||
!idev || unlikely(idev->cnf.disable_ipv6)) {
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -109,10 +109,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
||||
if (hdr->version != 6)
|
||||
goto err;
|
||||
|
||||
IP6_ADD_STATS_BH(net, idev,
|
||||
IPSTATS_MIB_NOECTPKTS +
|
||||
__IP6_ADD_STATS(net, idev,
|
||||
IPSTATS_MIB_NOECTPKTS +
|
||||
(ipv6_get_dsfield(hdr) & INET_ECN_MASK),
|
||||
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
|
||||
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
|
||||
/*
|
||||
* RFC4291 2.5.3
|
||||
* A packet received on an interface with a destination address
|
||||
@ -169,12 +169,12 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
||||
/* pkt_len may be zero if Jumbo payload option is present */
|
||||
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
|
||||
if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
|
||||
IP6_INC_STATS_BH(net,
|
||||
idev, IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
__IP6_INC_STATS(net,
|
||||
idev, IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
goto drop;
|
||||
}
|
||||
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
goto drop;
|
||||
}
|
||||
hdr = ipv6_hdr(skb);
|
||||
@ -182,7 +182,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
||||
|
||||
if (hdr->nexthdr == NEXTHDR_HOP) {
|
||||
if (ipv6_parse_hopopts(skb) < 0) {
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
rcu_read_unlock();
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
@ -197,7 +197,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
|
||||
net, NULL, skb, dev, NULL,
|
||||
ip6_rcv_finish);
|
||||
err:
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
|
||||
drop:
|
||||
rcu_read_unlock();
|
||||
kfree_skb(skb);
|
||||
@ -259,18 +259,18 @@ resubmit:
|
||||
if (ret > 0)
|
||||
goto resubmit;
|
||||
else if (ret == 0)
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
|
||||
} else {
|
||||
if (!raw) {
|
||||
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
|
||||
IP6_INC_STATS_BH(net, idev,
|
||||
IPSTATS_MIB_INUNKNOWNPROTOS);
|
||||
__IP6_INC_STATS(net, idev,
|
||||
IPSTATS_MIB_INUNKNOWNPROTOS);
|
||||
icmpv6_send(skb, ICMPV6_PARAMPROB,
|
||||
ICMPV6_UNK_NEXTHDR, nhoff);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
} else {
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
|
||||
consume_skb(skb);
|
||||
}
|
||||
}
|
||||
@ -278,7 +278,7 @@ resubmit:
|
||||
return 0;
|
||||
|
||||
discard:
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
|
||||
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
|
||||
rcu_read_unlock();
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
@ -297,7 +297,7 @@ int ip6_mc_input(struct sk_buff *skb)
|
||||
const struct ipv6hdr *hdr;
|
||||
bool deliver;
|
||||
|
||||
IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
|
||||
__IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
|
||||
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
|
||||
skb->len);
|
||||
|
||||
|
@ -395,8 +395,8 @@ int ip6_forward(struct sk_buff *skb)
|
||||
goto drop;
|
||||
|
||||
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -427,8 +427,8 @@ int ip6_forward(struct sk_buff *skb)
|
||||
/* Force OUTPUT device used as source address */
|
||||
skb->dev = dst->dev;
|
||||
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
|
||||
kfree_skb(skb);
|
||||
return -ETIMEDOUT;
|
||||
@ -441,15 +441,15 @@ int ip6_forward(struct sk_buff *skb)
|
||||
if (proxied > 0)
|
||||
return ip6_input(skb);
|
||||
else if (proxied < 0) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (!xfrm6_route_forward(skb)) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
dst = skb_dst(skb);
|
||||
@ -505,17 +505,17 @@ int ip6_forward(struct sk_buff *skb)
|
||||
/* Again, force OUTPUT device used as source address */
|
||||
skb->dev = dst->dev;
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INTOOBIGERRORS);
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_FRAGFAILS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_INTOOBIGERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_FRAGFAILS);
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
if (skb_cow(skb, dst->dev->hard_header_len)) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_OUTDISCARDS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_OUTDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -525,14 +525,14 @@ int ip6_forward(struct sk_buff *skb)
|
||||
|
||||
hdr->hop_limit--;
|
||||
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
|
||||
net, NULL, skb, skb->dev, dst->dev,
|
||||
ip6_forward_finish);
|
||||
|
||||
error:
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
|
@ -1984,10 +1984,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
|
||||
|
||||
static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
return dst_output(net, sk, skb);
|
||||
}
|
||||
|
||||
|
@ -145,12 +145,12 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
|
||||
if (!dev)
|
||||
goto out_rcu_unlock;
|
||||
|
||||
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
|
||||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
|
||||
|
||||
if (inet_frag_evicting(&fq->q))
|
||||
goto out_rcu_unlock;
|
||||
|
||||
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
|
||||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
|
||||
|
||||
/* Don't send error if the first segment did not arrive. */
|
||||
if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
|
||||
@ -223,8 +223,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
||||
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
|
||||
|
||||
if ((unsigned int)end > IPV6_MAXPLEN) {
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
|
||||
((u8 *)&fhdr->frag_off -
|
||||
skb_network_header(skb)));
|
||||
@ -258,8 +258,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
||||
/* RFC2460 says always send parameter problem in
|
||||
* this case. -DaveM
|
||||
*/
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
|
||||
offsetof(struct ipv6hdr, payload_len));
|
||||
return -1;
|
||||
@ -361,8 +361,8 @@ found:
|
||||
discard_fq:
|
||||
inet_frag_kill(&fq->q, &ip6_frags);
|
||||
err:
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_REASMFAILS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_REASMFAILS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -500,7 +500,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
||||
skb_network_header_len(head));
|
||||
|
||||
rcu_read_lock();
|
||||
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
|
||||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
|
||||
rcu_read_unlock();
|
||||
fq->q.fragments = NULL;
|
||||
fq->q.fragments_tail = NULL;
|
||||
@ -513,7 +513,7 @@ out_oom:
|
||||
net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
|
||||
out_fail:
|
||||
rcu_read_lock();
|
||||
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
|
||||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
|
||||
rcu_read_unlock();
|
||||
return -1;
|
||||
}
|
||||
@ -528,7 +528,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
||||
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
|
||||
goto fail_hdr;
|
||||
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
|
||||
|
||||
/* Jumbo payload inhibits frag. header */
|
||||
if (hdr->payload_len == 0)
|
||||
@ -544,8 +544,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
||||
if (!(fhdr->frag_off & htons(0xFFF9))) {
|
||||
/* It is not a fragmented frame */
|
||||
skb->transport_header += sizeof(struct frag_hdr);
|
||||
IP6_INC_STATS_BH(net,
|
||||
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
|
||||
__IP6_INC_STATS(net,
|
||||
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
|
||||
|
||||
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
|
||||
IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
|
||||
@ -566,13 +566,13 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
|
||||
fail_hdr:
|
||||
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
|
||||
return -1;
|
||||
}
|
||||
|
@ -155,11 +155,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
|
||||
if (mss == 0) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
|
||||
goto out;
|
||||
}
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
|
||||
|
||||
/* check for timestamp cookie support */
|
||||
memset(&tcp_opt, 0, sizeof(tcp_opt));
|
||||
|
@ -336,8 +336,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
skb->dev->ifindex);
|
||||
|
||||
if (!sk) {
|
||||
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -352,13 +352,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
|
||||
if (sk->sk_state == TCP_CLOSE)
|
||||
goto out;
|
||||
|
||||
if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -368,7 +368,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
|
||||
if (sk->sk_state != TCP_LISTEN &&
|
||||
!between(seq, snd_una, tp->snd_nxt)) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
|
||||
return false;
|
||||
|
||||
if (hash_expected && !hash_location) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!hash_expected && hash_location) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -825,9 +825,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
|
||||
if (!IS_ERR(dst)) {
|
||||
skb_dst_set(buff, dst);
|
||||
ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
|
||||
if (rst)
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1165,7 +1165,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
||||
return newsk;
|
||||
|
||||
out_overflow:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
out_nonewsk:
|
||||
dst_release(dst);
|
||||
out:
|
||||
@ -1276,8 +1276,8 @@ discard:
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
csum_err:
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
|
||||
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
|
||||
goto discard;
|
||||
|
||||
|
||||
@ -1359,7 +1359,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
|
||||
/*
|
||||
* Count it even if it's bad.
|
||||
*/
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_INSEGS);
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
|
||||
goto discard_it;
|
||||
@ -1421,7 +1421,7 @@ process:
|
||||
}
|
||||
}
|
||||
if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
|
||||
goto discard_and_relse;
|
||||
}
|
||||
|
||||
@ -1454,7 +1454,7 @@ process:
|
||||
} else if (unlikely(sk_add_backlog(sk, skb,
|
||||
sk->sk_rcvbuf + sk->sk_sndbuf))) {
|
||||
bh_unlock_sock(sk);
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
|
||||
__NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
|
||||
goto discard_and_relse;
|
||||
}
|
||||
bh_unlock_sock(sk);
|
||||
@ -1472,9 +1472,9 @@ no_tcp_socket:
|
||||
|
||||
if (tcp_checksum_complete(skb)) {
|
||||
csum_error:
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
|
||||
bad_packet:
|
||||
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
|
||||
__TCP_INC_STATS(net, TCP_MIB_INERRS);
|
||||
} else {
|
||||
tcp_v6_send_reset(NULL, skb);
|
||||
}
|
||||
|
@ -423,24 +423,22 @@ try_again:
|
||||
if (!peeked) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
if (is_udp4)
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INERRORS,
|
||||
is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
|
||||
is_udplite);
|
||||
else
|
||||
UDP6_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INERRORS,
|
||||
is_udplite);
|
||||
UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
|
||||
is_udplite);
|
||||
}
|
||||
skb_free_datagram_locked(sk, skb);
|
||||
return err;
|
||||
}
|
||||
if (!peeked) {
|
||||
if (is_udp4)
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INDATAGRAMS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
|
||||
is_udplite);
|
||||
else
|
||||
UDP6_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INDATAGRAMS, is_udplite);
|
||||
UDP6_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
|
||||
is_udplite);
|
||||
}
|
||||
|
||||
sock_recv_ts_and_drops(msg, sk, skb);
|
||||
@ -487,15 +485,15 @@ csum_copy_err:
|
||||
slow = lock_sock_fast(sk);
|
||||
if (!skb_kill_datagram(sk, skb, flags)) {
|
||||
if (is_udp4) {
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_CSUMERRORS, is_udplite);
|
||||
UDP_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_CSUMERRORS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
} else {
|
||||
UDP6_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_CSUMERRORS, is_udplite);
|
||||
UDP6_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
UDP6_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_CSUMERRORS, is_udplite);
|
||||
UDP6_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
}
|
||||
}
|
||||
unlock_sock_fast(sk, slow);
|
||||
@ -523,8 +521,8 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
|
||||
inet6_iif(skb), udptable, skb);
|
||||
if (!sk) {
|
||||
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -572,9 +570,9 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
/* Note that an ENOMEM error is charged twice */
|
||||
if (rc == -ENOMEM)
|
||||
UDP6_INC_STATS_BH(sock_net(sk),
|
||||
UDP_MIB_RCVBUFERRORS, is_udplite);
|
||||
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
__UDP6_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_RCVBUFERRORS, is_udplite);
|
||||
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
@ -630,9 +628,9 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
ret = encap_rcv(sk, skb);
|
||||
if (ret <= 0) {
|
||||
UDP_INC_STATS_BH(sock_net(sk),
|
||||
UDP_MIB_INDATAGRAMS,
|
||||
is_udplite);
|
||||
__UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_INDATAGRAMS,
|
||||
is_udplite);
|
||||
return -ret;
|
||||
}
|
||||
}
|
||||
@ -666,8 +664,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
udp_csum_pull_header(skb);
|
||||
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
|
||||
UDP6_INC_STATS_BH(sock_net(sk),
|
||||
UDP_MIB_RCVBUFERRORS, is_udplite);
|
||||
__UDP6_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_RCVBUFERRORS, is_udplite);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
@ -686,9 +684,9 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||
return rc;
|
||||
|
||||
csum_error:
|
||||
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
drop:
|
||||
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
@ -771,10 +769,10 @@ start_lookup:
|
||||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (unlikely(!nskb)) {
|
||||
atomic_inc(&sk->sk_drops);
|
||||
UDP6_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -793,8 +791,8 @@ start_lookup:
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
|
||||
proto == IPPROTO_UDPLITE);
|
||||
__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
|
||||
proto == IPPROTO_UDPLITE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -887,7 +885,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
||||
if (udp_lib_checksum_complete(skb))
|
||||
goto csum_error;
|
||||
|
||||
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
|
||||
__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
|
||||
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
|
||||
|
||||
kfree_skb(skb);
|
||||
@ -901,9 +899,9 @@ short_packet:
|
||||
daddr, ntohs(uh->dest));
|
||||
goto discard;
|
||||
csum_error:
|
||||
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
|
||||
__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
|
||||
discard:
|
||||
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
|
||||
__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
@ -1015,13 +1013,14 @@ send:
|
||||
err = ip6_send_skb(skb);
|
||||
if (err) {
|
||||
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
|
||||
UDP6_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
UDP6_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
err = 0;
|
||||
}
|
||||
} else
|
||||
UDP6_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_OUTDATAGRAMS, is_udplite);
|
||||
} else {
|
||||
UDP6_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_OUTDATAGRAMS, is_udplite);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1342,8 +1341,8 @@ out:
|
||||
* seems like overkill.
|
||||
*/
|
||||
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
|
||||
UDP6_INC_STATS_USER(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
UDP6_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_SNDBUFERRORS, is_udplite);
|
||||
}
|
||||
return err;
|
||||
|
||||
|
@ -698,12 +698,12 @@ void rxrpc_data_ready(struct sock *sk)
|
||||
if (skb_checksum_complete(skb)) {
|
||||
rxrpc_free_skb(skb);
|
||||
rxrpc_put_local(local);
|
||||
UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0);
|
||||
__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
|
||||
_leave(" [CSUM failed]");
|
||||
return;
|
||||
}
|
||||
|
||||
UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0);
|
||||
__UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
|
||||
|
||||
/* The socket buffer we have is owned by UDP, with UDP's data all over
|
||||
* it, but we really want our own data there.
|
||||
|
@ -239,7 +239,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
|
||||
offset = 0;
|
||||
|
||||
if ((whole > 1) || (whole && over))
|
||||
SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
|
||||
SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
|
||||
|
||||
/* Create chunks for all the full sized DATA chunks. */
|
||||
for (i = 0, len = first_len; i < whole; i++) {
|
||||
|
@ -84,7 +84,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
|
||||
|
||||
if (val != cmp) {
|
||||
/* CRC failure, dump it. */
|
||||
SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
|
||||
__SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
@ -122,7 +122,7 @@ int sctp_rcv(struct sk_buff *skb)
|
||||
if (skb->pkt_type != PACKET_HOST)
|
||||
goto discard_it;
|
||||
|
||||
SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
|
||||
__SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
|
||||
|
||||
if (skb_linearize(skb))
|
||||
goto discard_it;
|
||||
@ -208,7 +208,7 @@ int sctp_rcv(struct sk_buff *skb)
|
||||
*/
|
||||
if (!asoc) {
|
||||
if (sctp_rcv_ootb(skb)) {
|
||||
SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
|
||||
__SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
|
||||
goto discard_release;
|
||||
}
|
||||
}
|
||||
@ -264,9 +264,9 @@ int sctp_rcv(struct sk_buff *skb)
|
||||
skb = NULL; /* sctp_chunk_free already freed the skb */
|
||||
goto discard_release;
|
||||
}
|
||||
SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
|
||||
__SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
|
||||
} else {
|
||||
SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
|
||||
__SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
|
||||
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
|
||||
}
|
||||
|
||||
@ -281,7 +281,7 @@ int sctp_rcv(struct sk_buff *skb)
|
||||
return 0;
|
||||
|
||||
discard_it:
|
||||
SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
|
||||
__SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
|
||||
@ -532,7 +532,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
|
||||
* servers this needs to be solved differently.
|
||||
*/
|
||||
if (sock_owned_by_user(sk))
|
||||
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
|
||||
|
||||
*app = asoc;
|
||||
*tpp = transport;
|
||||
@ -589,7 +589,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
|
||||
skb->network_header = saveip;
|
||||
skb->transport_header = savesctp;
|
||||
if (!sk) {
|
||||
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
|
||||
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
|
||||
return;
|
||||
}
|
||||
/* Warning: The sock lock is held. Remember to call
|
||||
|
@ -162,7 +162,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
skb->network_header = saveip;
|
||||
skb->transport_header = savesctp;
|
||||
if (!sk) {
|
||||
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS);
|
||||
__ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1018,11 +1018,11 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
|
||||
|
||||
/* Suck it into the iovec, verify checksum if not done by hw. */
|
||||
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
|
||||
UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
|
||||
__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
|
||||
__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
|
||||
|
||||
xprt_adjust_cwnd(xprt, task, copied);
|
||||
xprt_complete_rqst(task, copied);
|
||||
|
Loading…
Reference in New Issue
Block a user