2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
|
|
|
* operating system. INET is implemented using the BSD Socket
|
|
|
|
* interface as the means of communication with the user level.
|
|
|
|
*
|
|
|
|
* Checksumming functions for IP, TCP, UDP and so on
|
|
|
|
*
|
|
|
|
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
|
|
|
|
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
|
|
|
|
* Borrows very liberally from tcp.c and ip.c, see those
|
|
|
|
* files for more names.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _CHECKSUM_H
|
|
|
|
#define _CHECKSUM_H
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <asm/types.h>
|
|
|
|
#include <asm/byteorder.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/checksum.h>
|
|
|
|
|
|
|
|
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
|
|
|
|
static inline
|
2006-11-15 13:23:59 +08:00
|
|
|
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
|
2020-07-11 12:27:49 +08:00
|
|
|
int len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-02-19 03:26:32 +08:00
|
|
|
if (copy_from_user(dst, src, len))
|
2020-07-11 12:27:49 +08:00
|
|
|
return 0;
|
|
|
|
return csum_partial(dst, len, ~0U);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef HAVE_CSUM_COPY_USER
|
2006-11-15 13:23:59 +08:00
|
|
|
static __inline__ __wsum csum_and_copy_to_user
|
2020-07-11 12:27:49 +08:00
|
|
|
(const void *src, void __user *dst, int len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-07-11 12:27:49 +08:00
|
|
|
__wsum sum = csum_partial(src, len, ~0U);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-04-26 06:01:30 +08:00
|
|
|
if (copy_to_user(dst, src, len) == 0)
|
|
|
|
return sum;
|
2020-07-11 12:27:49 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
unify generic instances of csum_partial_copy_nocheck()
quite a few architectures have the same csum_partial_copy_nocheck() -
simply memcpy() the data and then return the csum of the copy.
hexagon, parisc, ia64, s390, um: explicitly spelled out that way.
arc, arm64, csky, h8300, m68k/nommu, microblaze, mips/GENERIC_CSUM, nds32,
nios2, openrisc, riscv, unicore32: end up picking the same thing spelled
out in lib/checksum.h (with varying amounts of perversions along the way).
everybody else (alpha, arm, c6x, m68k/mmu, mips/!GENERIC_CSUM, powerpc,
sh, sparc, x86, xtensa) have non-generic variants. For all except c6x
the declaration is in their asm/checksum.h. c6x uses the wrapper
from asm-generic/checksum.h that would normally lead to the lib/checksum.h
instance, but in case of c6x we end up using an asm function from arch/c6x
instead.
Screw that mess - have architectures with private instances define
_HAVE_ARCH_CSUM_AND_COPY in their asm/checksum.h and have the default
one right in net/checksum.h conditional on _HAVE_ARCH_CSUM_AND_COPY
*not* defined.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2020-07-23 10:14:36 +08:00
|
|
|
#ifndef _HAVE_ARCH_CSUM_AND_COPY
|
|
|
|
static inline __wsum
|
2020-07-11 12:12:07 +08:00
|
|
|
csum_partial_copy_nocheck(const void *src, void *dst, int len)
|
unify generic instances of csum_partial_copy_nocheck()
quite a few architectures have the same csum_partial_copy_nocheck() -
simply memcpy() the data and then return the csum of the copy.
hexagon, parisc, ia64, s390, um: explicitly spelled out that way.
arc, arm64, csky, h8300, m68k/nommu, microblaze, mips/GENERIC_CSUM, nds32,
nios2, openrisc, riscv, unicore32: end up picking the same thing spelled
out in lib/checksum.h (with varying amounts of perversions along the way).
everybody else (alpha, arm, c6x, m68k/mmu, mips/!GENERIC_CSUM, powerpc,
sh, sparc, x86, xtensa) have non-generic variants. For all except c6x
the declaration is in their asm/checksum.h. c6x uses the wrapper
from asm-generic/checksum.h that would normally lead to the lib/checksum.h
instance, but in case of c6x we end up using an asm function from arch/c6x
instead.
Screw that mess - have architectures with private instances define
_HAVE_ARCH_CSUM_AND_COPY in their asm/checksum.h and have the default
one right in net/checksum.h conditional on _HAVE_ARCH_CSUM_AND_COPY
*not* defined.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2020-07-23 10:14:36 +08:00
|
|
|
{
|
|
|
|
memcpy(dst, src, len);
|
2020-07-11 12:12:07 +08:00
|
|
|
return csum_partial(dst, len, 0);
|
unify generic instances of csum_partial_copy_nocheck()
quite a few architectures have the same csum_partial_copy_nocheck() -
simply memcpy() the data and then return the csum of the copy.
hexagon, parisc, ia64, s390, um: explicitly spelled out that way.
arc, arm64, csky, h8300, m68k/nommu, microblaze, mips/GENERIC_CSUM, nds32,
nios2, openrisc, riscv, unicore32: end up picking the same thing spelled
out in lib/checksum.h (with varying amounts of perversions along the way).
everybody else (alpha, arm, c6x, m68k/mmu, mips/!GENERIC_CSUM, powerpc,
sh, sparc, x86, xtensa) have non-generic variants. For all except c6x
the declaration is in their asm/checksum.h. c6x uses the wrapper
from asm-generic/checksum.h that would normally lead to the lib/checksum.h
instance, but in case of c6x we end up using an asm function from arch/c6x
instead.
Screw that mess - have architectures with private instances define
_HAVE_ARCH_CSUM_AND_COPY in their asm/checksum.h and have the default
one right in net/checksum.h conditional on _HAVE_ARCH_CSUM_AND_COPY
*not* defined.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2020-07-23 10:14:36 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-05-03 07:28:03 +08:00
|
|
|
#ifndef HAVE_ARCH_CSUM_ADD
|
2006-11-15 13:23:59 +08:00
|
|
|
static inline __wsum csum_add(__wsum csum, __wsum addend)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-15 13:23:59 +08:00
|
|
|
u32 res = (__force u32)csum;
|
|
|
|
res += (__force u32)addend;
|
|
|
|
return (__force __wsum)(res + (res < (__force u32)addend));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2014-05-03 07:28:03 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-11-15 13:23:59 +08:00
|
|
|
static inline __wsum csum_sub(__wsum csum, __wsum addend)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
return csum_add(csum, ~addend);
|
|
|
|
}
|
|
|
|
|
2014-03-24 10:51:36 +08:00
|
|
|
static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
|
|
|
|
{
|
|
|
|
u16 res = (__force u16)csum;
|
|
|
|
|
|
|
|
res += (__force u16)addend;
|
|
|
|
return (__force __sum16)(res + (res < (__force u16)addend));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
|
|
|
|
{
|
|
|
|
return csum16_add(csum, ~addend);
|
|
|
|
}
|
|
|
|
|
2006-11-15 13:23:59 +08:00
|
|
|
static inline __wsum
|
|
|
|
csum_block_add(__wsum csum, __wsum csum2, int offset)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-15 13:23:59 +08:00
|
|
|
u32 sum = (__force u32)csum2;
|
2016-03-10 01:25:26 +08:00
|
|
|
|
|
|
|
/* rotate sum to align it with a 16b boundary */
|
|
|
|
if (offset & 1)
|
|
|
|
sum = ror32(sum, 8);
|
|
|
|
|
2006-11-15 13:23:59 +08:00
|
|
|
return csum_add(csum, (__force __wsum)sum);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-10-30 18:50:51 +08:00
|
|
|
static inline __wsum
|
|
|
|
csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
|
|
|
|
{
|
|
|
|
return csum_block_add(csum, csum2, offset);
|
|
|
|
}
|
|
|
|
|
2006-11-15 13:23:59 +08:00
|
|
|
static inline __wsum
|
|
|
|
csum_block_sub(__wsum csum, __wsum csum2, int offset)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-03-10 01:25:26 +08:00
|
|
|
return csum_block_add(csum, ~csum2, offset);
|
2006-11-15 13:23:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline __wsum csum_unfold(__sum16 n)
|
|
|
|
{
|
|
|
|
return (__force __wsum)n;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-11-05 00:10:25 +08:00
|
|
|
static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
|
|
|
|
{
|
|
|
|
return csum_partial(buff, len, sum);
|
|
|
|
}
|
|
|
|
|
2006-11-16 18:36:50 +08:00
|
|
|
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
|
2007-11-29 22:14:30 +08:00
|
|
|
|
bpf: allow bpf_csum_diff to feed bpf_l3_csum_replace as well
Commit 7d672345ed29 ("bpf: add generic bpf_csum_diff helper") added a
generic checksum diff helper that can feed bpf_l4_csum_replace() with
a target __wsum diff that is to be applied to the L4 checksum. This
facility is very flexible, can be cascaded, allows for adding, removing,
or diffing data, or for calculating the pseudo header checksum from
scratch, but it can also be reused for working with the IPv4 header
checksum.
Thus, analogous to bpf_l4_csum_replace(), add a case for header field
value of 0 to change the checksum at a given offset through a new helper
csum_replace_by_diff(). Also, in addition to that, this provides an
easy to use interface for feeding precalculated diffs f.e. coming from
a map. It nicely complements bpf_l3_csum_replace() that currently allows
only for csum updates of 2 and 4 byte diffs.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-03-04 22:15:02 +08:00
|
|
|
static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
|
|
|
|
{
|
|
|
|
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
|
|
|
|
}
|
|
|
|
|
2007-11-29 22:14:30 +08:00
|
|
|
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
|
|
|
|
{
|
2015-05-15 23:52:19 +08:00
|
|
|
__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
|
|
|
|
|
|
|
|
*sum = csum_fold(csum_add(tmp, (__force __wsum)to));
|
2007-11-29 22:14:30 +08:00
|
|
|
}
|
|
|
|
|
2014-03-24 10:51:36 +08:00
|
|
|
/* Implements RFC 1624 (Incremental Internet Checksum)
|
|
|
|
* 3. Discussion states :
|
|
|
|
* HC' = ~(~HC + ~m + m')
|
|
|
|
* m : old value of a 16bit field
|
|
|
|
* m' : new value of a 16bit field
|
|
|
|
*/
|
|
|
|
static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
|
2007-11-29 22:14:30 +08:00
|
|
|
{
|
2014-03-24 10:51:36 +08:00
|
|
|
*sum = ~csum16_add(csum16_sub(~(*sum), old), new);
|
2007-11-29 22:14:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct sk_buff;
|
2013-08-01 08:31:38 +08:00
|
|
|
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
|
2015-08-18 04:42:25 +08:00
|
|
|
__be32 from, __be32 to, bool pseudohdr);
|
2013-08-01 08:31:38 +08:00
|
|
|
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
|
|
|
|
const __be32 *from, const __be32 *to,
|
2015-08-18 04:42:25 +08:00
|
|
|
bool pseudohdr);
|
2015-08-18 04:42:26 +08:00
|
|
|
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
|
|
|
|
__wsum diff, bool pseudohdr);
|
2007-11-29 22:14:30 +08:00
|
|
|
|
|
|
|
static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
|
|
|
|
__be16 from, __be16 to,
|
2015-08-18 04:42:25 +08:00
|
|
|
bool pseudohdr)
|
2007-11-29 22:14:30 +08:00
|
|
|
{
|
|
|
|
inet_proto_csum_replace4(sum, skb, (__force __be32)from,
|
|
|
|
(__force __be32)to, pseudohdr);
|
|
|
|
}
|
|
|
|
|
2014-11-26 03:21:19 +08:00
|
|
|
static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
|
|
|
|
int start, int offset)
|
|
|
|
{
|
|
|
|
__sum16 *psum = (__sum16 *)(ptr + offset);
|
|
|
|
__wsum delta;
|
|
|
|
|
|
|
|
/* Subtract out checksum up to start */
|
|
|
|
csum = csum_sub(csum, csum_partial(ptr, start, 0));
|
|
|
|
|
|
|
|
/* Set derived checksum in packet */
|
2015-12-11 04:37:44 +08:00
|
|
|
delta = csum_sub((__force __wsum)csum_fold(csum),
|
|
|
|
(__force __wsum)*psum);
|
2014-11-26 03:21:19 +08:00
|
|
|
*psum = csum_fold(csum);
|
|
|
|
|
|
|
|
return delta;
|
|
|
|
}
|
|
|
|
|
2015-02-11 08:30:27 +08:00
|
|
|
static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
|
|
|
|
{
|
2017-01-19 04:14:56 +08:00
|
|
|
*psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
|
2015-02-11 08:30:27 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|