mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-03 17:14:14 +08:00
ring-buffer: Remove HAVE_64BIT_ALIGNED_ACCESS
Commit c19fa94a8f
("Add HAVE_64BIT_ALIGNED_ACCESS") added the config for
architectures that required 64bit aligned access for all 64bit words. As
the ftrace ring buffer stores data on 4 byte alignment, this config option
was used to force it to store data on 8 byte alignment to make sure the data
being stored and written directly into the ring buffer was 8 byte aligned as
it would cause issues trying to write an 8 byte word on a 4 not 8 byte
aligned memory location.
But with the removal of the metag architecture, which was the only
architecture to use this, there is no architecture supported by Linux that
requires 8 byte aligne access for all 8 byte words (4 byte alignment is good
enough). Removing this config can simplify the code a bit.
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
a124692b69
commit
86b3de60a0
16
arch/Kconfig
16
arch/Kconfig
@ -128,22 +128,6 @@ config UPROBES
|
||||
managed by the kernel and kept transparent to the probed
|
||||
application. )
|
||||
|
||||
config HAVE_64BIT_ALIGNED_ACCESS
|
||||
def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
help
|
||||
Some architectures require 64 bit accesses to be 64 bit
|
||||
aligned, which also requires structs containing 64 bit values
|
||||
to be 64 bit aligned too. This includes some 32 bit
|
||||
architectures which can do 64 bit accesses, as well as 64 bit
|
||||
architectures without unaligned access.
|
||||
|
||||
This symbol should be selected by an architecture if 64 bit
|
||||
accesses are required to be 64 bit aligned in this way even
|
||||
though it is not a 64 bit architecture.
|
||||
|
||||
See Documentation/unaligned-memory-access.txt for more
|
||||
information on the topic of unaligned memory accesses.
|
||||
|
||||
config HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
bool
|
||||
help
|
||||
|
@ -128,16 +128,7 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
|
||||
#define RB_ALIGNMENT 4U
|
||||
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
|
||||
#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
|
||||
|
||||
#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
|
||||
# define RB_FORCE_8BYTE_ALIGNMENT 0
|
||||
# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
|
||||
#else
|
||||
# define RB_FORCE_8BYTE_ALIGNMENT 1
|
||||
# define RB_ARCH_ALIGNMENT 8U
|
||||
#endif
|
||||
|
||||
#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
|
||||
#define RB_ALIGN_DATA __aligned(RB_ALIGNMENT)
|
||||
|
||||
/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
|
||||
#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
|
||||
@ -2373,7 +2364,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
|
||||
event->time_delta = delta;
|
||||
length -= RB_EVNT_HDR_SIZE;
|
||||
if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
|
||||
if (length > RB_MAX_SMALL_DATA) {
|
||||
event->type_len = 0;
|
||||
event->array[0] = length;
|
||||
} else
|
||||
@ -2388,11 +2379,11 @@ static unsigned rb_calculate_event_length(unsigned length)
|
||||
if (!length)
|
||||
length++;
|
||||
|
||||
if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
|
||||
if (length > RB_MAX_SMALL_DATA)
|
||||
length += sizeof(event.array[0]);
|
||||
|
||||
length += RB_EVNT_HDR_SIZE;
|
||||
length = ALIGN(length, RB_ARCH_ALIGNMENT);
|
||||
length = ALIGN(length, RB_ALIGNMENT);
|
||||
|
||||
/*
|
||||
* In case the time delta is larger than the 27 bits for it
|
||||
|
Loading…
Reference in New Issue
Block a user