mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-10 07:44:23 +08:00
powerpc: Use rol32() instead of opencoding in csum_fold()
rol32(x, 16) will do the rotate using rlwinm. No need to open code using inline assembly. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/794337eff7bb803d2c4e67d9eee635390c4c48fe.1646812553.git.christophe.leroy@csgroup.eu
This commit is contained in:
parent
e6f6390ab7
commit
a1ae431705
@ -38,14 +38,15 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
|
||||
*/
|
||||
static inline __sum16 csum_fold(__wsum sum)
|
||||
{
|
||||
unsigned int tmp;
|
||||
u32 tmp = (__force u32)sum;
|
||||
|
||||
/* swap the two 16-bit halves of sum */
|
||||
__asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
|
||||
/* if there is a carry from adding the two 16-bit halves,
|
||||
it will carry from the lower half into the upper half,
|
||||
giving us the correct sum in the upper half. */
|
||||
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
|
||||
/*
|
||||
* swap the two 16-bit halves of sum
|
||||
* if there is a carry from adding the two 16-bit halves,
|
||||
* it will carry from the lower half into the upper half,
|
||||
* giving us the correct sum in the upper half.
|
||||
*/
|
||||
return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
|
||||
}
|
||||
|
||||
static inline u32 from64to32(u64 x)
|
||||
|
Loading…
Reference in New Issue
Block a user