mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
asm-generic: simplify asm/unaligned.h
The get_unaligned()/put_unaligned() implementations are much more complex than necessary, now that all architectures use the same code. Move everything into one file and use a much more compact way to express the same logic. I've compared the binary output using gcc-11 across defconfig builds for all architectures and found this patch to make no difference, except for a single function on powerpc that needs two additional register moves because of random differences in register allocation. There are a handful of callers of the low-level __get_unaligned_cpu32, so leave that in place for the time being even though the common code no longer uses it. This adds a warning for any caller of get_unaligned()/put_unaligned() that passes in a single-byte pointer, but I've sent patches for all instances that show up in x86 and randconfig builds. It would be nice to change the arguments of the endian-specific accessors to take the matching __be16/__be32/__be64/__le16/__le32/__le64 arguments instead of a void pointer, but that requires more changes to the rest of the kernel. This new version does allow aggregate types into get_unaligned(), which was not the original goal but might come in handy. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
parent
d40d817948
commit
803f4e1eab
@ -6,20 +6,124 @@
|
||||
* This is the most generic implementation of unaligned accesses
|
||||
* and should work almost anywhere.
|
||||
*/
|
||||
#include <linux/unaligned/packed_struct.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
# include <linux/unaligned/le_struct.h>
|
||||
# include <linux/unaligned/generic.h>
|
||||
# define get_unaligned __get_unaligned_le
|
||||
# define put_unaligned __put_unaligned_le
|
||||
#elif defined(__BIG_ENDIAN)
|
||||
# include <linux/unaligned/be_struct.h>
|
||||
# include <linux/unaligned/generic.h>
|
||||
# define get_unaligned __get_unaligned_be
|
||||
# define put_unaligned __put_unaligned_be
|
||||
#else
|
||||
# error need to define endianess
|
||||
#endif
|
||||
#define __get_unaligned_t(type, ptr) ({ \
|
||||
const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
|
||||
__pptr->x; \
|
||||
})
|
||||
|
||||
#define __put_unaligned_t(type, val, ptr) do { \
|
||||
struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
|
||||
__pptr->x = (val); \
|
||||
} while (0)
|
||||
|
||||
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
|
||||
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
|
||||
|
||||
static inline u16 get_unaligned_le16(const void *p)
|
||||
{
|
||||
return le16_to_cpu(__get_unaligned_t(__le16, p));
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_le32(const void *p)
|
||||
{
|
||||
return le32_to_cpu(__get_unaligned_t(__le32, p));
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_le64(const void *p)
|
||||
{
|
||||
return le64_to_cpu(__get_unaligned_t(__le64, p));
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le16(u16 val, void *p)
|
||||
{
|
||||
__put_unaligned_t(__le16, cpu_to_le16(val), p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le32(u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_t(__le32, cpu_to_le32(val), p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le64(u64 val, void *p)
|
||||
{
|
||||
__put_unaligned_t(__le64, cpu_to_le64(val), p);
|
||||
}
|
||||
|
||||
static inline u16 get_unaligned_be16(const void *p)
|
||||
{
|
||||
return be16_to_cpu(__get_unaligned_t(__be16, p));
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_be32(const void *p)
|
||||
{
|
||||
return be32_to_cpu(__get_unaligned_t(__be32, p));
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_be64(const void *p)
|
||||
{
|
||||
return be64_to_cpu(__get_unaligned_t(__be64, p));
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be16(u16 val, void *p)
|
||||
{
|
||||
__put_unaligned_t(__be16, cpu_to_be16(val), p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be32(u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_t(__be32, cpu_to_be32(val), p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be64(u64 val, void *p)
|
||||
{
|
||||
__put_unaligned_t(__be64, cpu_to_be64(val), p);
|
||||
}
|
||||
|
||||
static inline u32 __get_unaligned_be24(const u8 *p)
|
||||
{
|
||||
return p[0] << 16 | p[1] << 8 | p[2];
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_be24(const void *p)
|
||||
{
|
||||
return __get_unaligned_be24(p);
|
||||
}
|
||||
|
||||
static inline u32 __get_unaligned_le24(const u8 *p)
|
||||
{
|
||||
return p[0] | p[1] << 8 | p[2] << 16;
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_le24(const void *p)
|
||||
{
|
||||
return __get_unaligned_le24(p);
|
||||
}
|
||||
|
||||
static inline void __put_unaligned_be24(const u32 val, u8 *p)
|
||||
{
|
||||
*p++ = val >> 16;
|
||||
*p++ = val >> 8;
|
||||
*p++ = val;
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be24(const u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_be24(val, p);
|
||||
}
|
||||
|
||||
static inline void __put_unaligned_le24(const u32 val, u8 *p)
|
||||
{
|
||||
*p++ = val;
|
||||
*p++ = val >> 8;
|
||||
*p++ = val >> 16;
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le24(const u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_le24(val, p);
|
||||
}
|
||||
|
||||
#endif /* __ASM_GENERIC_UNALIGNED_H */
|
||||
|
@ -1,67 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_UNALIGNED_BE_STRUCT_H
|
||||
#define _LINUX_UNALIGNED_BE_STRUCT_H
|
||||
|
||||
#include <linux/unaligned/packed_struct.h>
|
||||
|
||||
static inline u16 get_unaligned_be16(const void *p)
|
||||
{
|
||||
return __get_unaligned_cpu16((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_be32(const void *p)
|
||||
{
|
||||
return __get_unaligned_cpu32((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_be64(const void *p)
|
||||
{
|
||||
return __get_unaligned_cpu64((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be16(u16 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu16(val, p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be32(u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu32(val, p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be64(u64 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu64(val, p);
|
||||
}
|
||||
|
||||
static inline u16 get_unaligned_le16(const void *p)
|
||||
{
|
||||
return swab16(__get_unaligned_cpu16((const u8 *)p));
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_le32(const void *p)
|
||||
{
|
||||
return swab32(__get_unaligned_cpu32((const u8 *)p));
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_le64(const void *p)
|
||||
{
|
||||
return swab64(__get_unaligned_cpu64((const u8 *)p));
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le16(u16 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu16(swab16(val), p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le32(u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu32(swab32(val), p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le64(u64 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu64(swab64(val), p);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
|
@ -1,115 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_UNALIGNED_GENERIC_H
|
||||
#define _LINUX_UNALIGNED_GENERIC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Cause a link-time error if we try an unaligned access other than
|
||||
* 1,2,4 or 8 bytes long
|
||||
*/
|
||||
extern void __bad_unaligned_access_size(void);
|
||||
|
||||
#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
|
||||
__bad_unaligned_access_size())))); \
|
||||
}))
|
||||
|
||||
#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
|
||||
__bad_unaligned_access_size())))); \
|
||||
}))
|
||||
|
||||
#define __put_unaligned_le(val, ptr) ({ \
|
||||
void *__gu_p = (ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
*(u8 *)__gu_p = (__force u8)(val); \
|
||||
break; \
|
||||
case 2: \
|
||||
put_unaligned_le16((__force u16)(val), __gu_p); \
|
||||
break; \
|
||||
case 4: \
|
||||
put_unaligned_le32((__force u32)(val), __gu_p); \
|
||||
break; \
|
||||
case 8: \
|
||||
put_unaligned_le64((__force u64)(val), __gu_p); \
|
||||
break; \
|
||||
default: \
|
||||
__bad_unaligned_access_size(); \
|
||||
break; \
|
||||
} \
|
||||
(void)0; })
|
||||
|
||||
#define __put_unaligned_be(val, ptr) ({ \
|
||||
void *__gu_p = (ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
*(u8 *)__gu_p = (__force u8)(val); \
|
||||
break; \
|
||||
case 2: \
|
||||
put_unaligned_be16((__force u16)(val), __gu_p); \
|
||||
break; \
|
||||
case 4: \
|
||||
put_unaligned_be32((__force u32)(val), __gu_p); \
|
||||
break; \
|
||||
case 8: \
|
||||
put_unaligned_be64((__force u64)(val), __gu_p); \
|
||||
break; \
|
||||
default: \
|
||||
__bad_unaligned_access_size(); \
|
||||
break; \
|
||||
} \
|
||||
(void)0; })
|
||||
|
||||
static inline u32 __get_unaligned_be24(const u8 *p)
|
||||
{
|
||||
return p[0] << 16 | p[1] << 8 | p[2];
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_be24(const void *p)
|
||||
{
|
||||
return __get_unaligned_be24(p);
|
||||
}
|
||||
|
||||
static inline u32 __get_unaligned_le24(const u8 *p)
|
||||
{
|
||||
return p[0] | p[1] << 8 | p[2] << 16;
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_le24(const void *p)
|
||||
{
|
||||
return __get_unaligned_le24(p);
|
||||
}
|
||||
|
||||
static inline void __put_unaligned_be24(const u32 val, u8 *p)
|
||||
{
|
||||
*p++ = val >> 16;
|
||||
*p++ = val >> 8;
|
||||
*p++ = val;
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be24(const u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_be24(val, p);
|
||||
}
|
||||
|
||||
static inline void __put_unaligned_le24(const u32 val, u8 *p)
|
||||
{
|
||||
*p++ = val;
|
||||
*p++ = val >> 8;
|
||||
*p++ = val >> 16;
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le24(const u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_le24(val, p);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_UNALIGNED_GENERIC_H */
|
@ -1,67 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_UNALIGNED_LE_STRUCT_H
|
||||
#define _LINUX_UNALIGNED_LE_STRUCT_H
|
||||
|
||||
#include <linux/unaligned/packed_struct.h>
|
||||
|
||||
static inline u16 get_unaligned_le16(const void *p)
|
||||
{
|
||||
return __get_unaligned_cpu16((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_le32(const void *p)
|
||||
{
|
||||
return __get_unaligned_cpu32((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_le64(const void *p)
|
||||
{
|
||||
return __get_unaligned_cpu64((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le16(u16 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu16(val, p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le32(u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu32(val, p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le64(u64 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu64(val, p);
|
||||
}
|
||||
|
||||
static inline u16 get_unaligned_be16(const void *p)
|
||||
{
|
||||
return swab16(__get_unaligned_cpu16((const u8 *)p));
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_be32(const void *p)
|
||||
{
|
||||
return swab32(__get_unaligned_cpu32((const u8 *)p));
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_be64(const void *p)
|
||||
{
|
||||
return swab64(__get_unaligned_cpu64((const u8 *)p));
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be16(u16 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu16(swab16(val), p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be32(u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu32(swab32(val), p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be64(u64 val, void *p)
|
||||
{
|
||||
__put_unaligned_cpu64(swab64(val), p);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */
|
Loading…
Reference in New Issue
Block a user