kernel: add common infrastructure for unaligned access
Create a linux/unaligned directory similar in spirit to the linux/byteorder
folder to hold generic implementations collected from various arches.
Currently there are five implementations:
1) packed_struct.h: C-struct based, from asm-generic/unaligned.h
2) le_byteshift.h: Open coded byte-swapping, heavily based on asm-arm
3) be_byteshift.h: Open coded byte-swapping, heavily based on asm-arm
4) memmove.h: taken from multiple implementations in tree
5) access_ok.h: taken from x86 and others, unaligned access is ok.
All of the new implementations checks for sizes not equal to 1,2,4,8
and will fail to link.
API additions:
get_unaligned_{le16|le32|le64|be16|be32|be64}(p) which is meant to replace
code of the form:
le16_to_cpu(get_unaligned((__le16 *)p));
put_unaligned_{le16|le32|le64|be16|be32|be64}(val, pointer) which is meant to
replace code of the form:
put_unaligned(cpu_to_le16(val), (__le16 *)p);
The headers that arches should include from their asm/unaligned.h:
access_ok.h : Wrappers of the byteswapping functions in asm/byteorder
Choose a particular implementation for little-endian access:
le_byteshift.h
le_memmove.h (arch must be LE)
le_struct.h (arch must be LE)
Choose a particular implementation for big-endian access:
be_byteshift.h
be_memmove.h (arch must be BE)
be_struct.h (arch must be BE)
After including as needed from the above, include unaligned/generic.h and
define your arch's get/put_unaligned as (for LE):
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-29 16:03:27 +08:00
|
|
|
#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
|
|
|
|
#define _LINUX_UNALIGNED_PACKED_STRUCT_H
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
include/linux/unaligned: pack the whole struct rather than just the field
The current packed struct implementation of unaligned access adds the
packed attribute only to the field within the unaligned struct rather than
to the struct as a whole. This is not sufficient to enforce proper
behaviour on architectures with a default struct alignment of more than
one byte.
For example, the current implementation of __get_unaligned_cpu16 when
compiled for arm with gcc -O1 -mstructure-size-boundary=32 assumes the
struct is on a 4 byte boundary so performs the load of the 16bit packed
field as if it were on a 4 byte boundary:
__get_unaligned_cpu16:
ldrh r0, [r0, #0]
bx lr
Moving the packed attribute to the struct rather than the field causes the
proper unaligned access code to be generated:
__get_unaligned_cpu16:
ldrb r3, [r0, #0] @ zero_extendqisi2
ldrb r0, [r0, #1] @ zero_extendqisi2
orr r0, r3, r0, asl #8
bx lr
Signed-off-by: Will Newton <will.newton@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-12-22 09:24:29 +08:00
|
|
|
struct __una_u16 { u16 x; } __attribute__((packed));
|
|
|
|
struct __una_u32 { u32 x; } __attribute__((packed));
|
|
|
|
struct __una_u64 { u64 x; } __attribute__((packed));
|
kernel: add common infrastructure for unaligned access
Create a linux/unaligned directory similar in spirit to the linux/byteorder
folder to hold generic implementations collected from various arches.
Currently there are five implementations:
1) packed_struct.h: C-struct based, from asm-generic/unaligned.h
2) le_byteshift.h: Open coded byte-swapping, heavily based on asm-arm
3) be_byteshift.h: Open coded byte-swapping, heavily based on asm-arm
4) memmove.h: taken from multiple implementations in tree
5) access_ok.h: taken from x86 and others, unaligned access is ok.
All of the new implementations checks for sizes not equal to 1,2,4,8
and will fail to link.
API additions:
get_unaligned_{le16|le32|le64|be16|be32|be64}(p) which is meant to replace
code of the form:
le16_to_cpu(get_unaligned((__le16 *)p));
put_unaligned_{le16|le32|le64|be16|be32|be64}(val, pointer) which is meant to
replace code of the form:
put_unaligned(cpu_to_le16(val), (__le16 *)p);
The headers that arches should include from their asm/unaligned.h:
access_ok.h : Wrappers of the byteswapping functions in asm/byteorder
Choose a particular implementation for little-endian access:
le_byteshift.h
le_memmove.h (arch must be LE)
le_struct.h (arch must be LE)
Choose a particular implementation for big-endian access:
be_byteshift.h
be_memmove.h (arch must be BE)
be_struct.h (arch must be BE)
After including as needed from the above, include unaligned/generic.h and
define your arch's get/put_unaligned as (for LE):
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-29 16:03:27 +08:00
|
|
|
|
|
|
|
static inline u16 __get_unaligned_cpu16(const void *p)
|
|
|
|
{
|
|
|
|
const struct __una_u16 *ptr = (const struct __una_u16 *)p;
|
|
|
|
return ptr->x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 __get_unaligned_cpu32(const void *p)
|
|
|
|
{
|
|
|
|
const struct __una_u32 *ptr = (const struct __una_u32 *)p;
|
|
|
|
return ptr->x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 __get_unaligned_cpu64(const void *p)
|
|
|
|
{
|
|
|
|
const struct __una_u64 *ptr = (const struct __una_u64 *)p;
|
|
|
|
return ptr->x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __put_unaligned_cpu16(u16 val, void *p)
|
|
|
|
{
|
|
|
|
struct __una_u16 *ptr = (struct __una_u16 *)p;
|
|
|
|
ptr->x = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __put_unaligned_cpu32(u32 val, void *p)
|
|
|
|
{
|
|
|
|
struct __una_u32 *ptr = (struct __una_u32 *)p;
|
|
|
|
ptr->x = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __put_unaligned_cpu64(u64 val, void *p)
|
|
|
|
{
|
|
|
|
struct __una_u64 *ptr = (struct __una_u64 *)p;
|
|
|
|
ptr->x = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */
|