mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 19:53:59 +08:00
64e69073c3
arches can have more efficient implementation of these routines Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
88 lines
2.3 KiB
C
88 lines
2.3 KiB
C
#ifndef __ASM_GENERIC_CHECKSUM_H
|
|
#define __ASM_GENERIC_CHECKSUM_H
|
|
|
|
/*
|
|
* computes the checksum of a memory block at buff, length len,
|
|
* and adds in "sum" (32-bit)
|
|
*
|
|
* returns a 32-bit number suitable for feeding into itself
|
|
* or csum_tcpudp_magic
|
|
*
|
|
* this function must be called with even lengths, except
|
|
* for the last fragment, which may be odd
|
|
*
|
|
* it's best to have buff aligned on a 32-bit boundary
|
|
*/
|
|
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
|
|
|
|
/*
|
|
* the same as csum_partial, but copies from src while it
|
|
* checksums
|
|
*
|
|
* here even more important to align src and dst on a 32-bit (or even
|
|
* better 64-bit) boundary
|
|
*/
|
|
extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
|
|
|
|
/*
|
|
* the same as csum_partial_copy, but copies from user space.
|
|
*
|
|
* here even more important to align src and dst on a 32-bit (or even
|
|
* better 64-bit) boundary
|
|
*/
|
|
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
|
|
int len, __wsum sum, int *csum_err);
|
|
|
|
#ifndef csum_partial_copy_nocheck
|
|
#define csum_partial_copy_nocheck(src, dst, len, sum) \
|
|
csum_partial_copy((src), (dst), (len), (sum))
|
|
#endif
|
|
|
|
#ifndef ip_fast_csum
|
|
/*
|
|
* This is a version of ip_compute_csum() optimized for IP headers,
|
|
* which always checksum on 4 octet boundaries.
|
|
*/
|
|
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
|
|
#endif
|
|
|
|
#ifndef csum_fold
|
|
/*
|
|
* Fold a partial checksum
|
|
*/
|
|
static inline __sum16 csum_fold(__wsum csum)
|
|
{
|
|
u32 sum = (__force u32)csum;
|
|
sum = (sum & 0xffff) + (sum >> 16);
|
|
sum = (sum & 0xffff) + (sum >> 16);
|
|
return (__force __sum16)~sum;
|
|
}
|
|
#endif
|
|
|
|
#ifndef csum_tcpudp_nofold
|
|
/*
|
|
* computes the checksum of the TCP/UDP pseudo-header
|
|
* returns a 16-bit checksum, already complemented
|
|
*/
|
|
extern __wsum
|
|
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
|
|
unsigned short proto, __wsum sum);
|
|
#endif
|
|
|
|
#ifndef csum_tcpudp_magic
|
|
static inline __sum16
|
|
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
|
|
unsigned short proto, __wsum sum)
|
|
{
|
|
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* this routine is used for miscellaneous IP-like checksums, mainly
|
|
* in icmp.c
|
|
*/
|
|
extern __sum16 ip_compute_csum(const void *buff, int len);
|
|
|
|
#endif /* __ASM_GENERIC_CHECKSUM_H */
|