mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-20 11:13:58 +08:00
ring_buffer: allocate buffer page pointer
The current method of overlaying the page frame as the buffer page pointer can be very dangerous and limits our ability to do other things with a page from the buffer, like send it off to disk. This patch allocates the buffer_page instead of overlaying the page's page frame. The use of the buffer_page has hardly changed due to this. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
7104f300c5
commit
e4c2ce82ca
@ -115,16 +115,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
|
|||||||
* Thanks to Peter Zijlstra for suggesting this idea.
|
* Thanks to Peter Zijlstra for suggesting this idea.
|
||||||
*/
|
*/
|
||||||
struct buffer_page {
|
struct buffer_page {
|
||||||
union {
|
u64 time_stamp; /* page time stamp */
|
||||||
struct {
|
unsigned size; /* size of page data */
|
||||||
unsigned long flags; /* mandatory */
|
struct list_head list; /* list of free pages */
|
||||||
atomic_t _count; /* mandatory */
|
void *page; /* Actual data page */
|
||||||
u64 time_stamp; /* page time stamp */
|
|
||||||
unsigned size; /* size of page data */
|
|
||||||
struct list_head list; /* list of free pages */
|
|
||||||
};
|
|
||||||
struct page page;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -133,9 +127,9 @@ struct buffer_page {
|
|||||||
*/
|
*/
|
||||||
static inline void free_buffer_page(struct buffer_page *bpage)
|
static inline void free_buffer_page(struct buffer_page *bpage)
|
||||||
{
|
{
|
||||||
reset_page_mapcount(&bpage->page);
|
if (bpage->page)
|
||||||
bpage->page.mapping = NULL;
|
__free_page(bpage->page);
|
||||||
__free_page(&bpage->page);
|
kfree(bpage);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -237,11 +231,16 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
|
page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
|
||||||
|
GFP_KERNEL, cpu_to_node(cpu));
|
||||||
|
if (!page)
|
||||||
|
goto free_pages;
|
||||||
|
list_add(&page->list, &pages);
|
||||||
|
|
||||||
addr = __get_free_page(GFP_KERNEL);
|
addr = __get_free_page(GFP_KERNEL);
|
||||||
if (!addr)
|
if (!addr)
|
||||||
goto free_pages;
|
goto free_pages;
|
||||||
page = (struct buffer_page *)virt_to_page(addr);
|
page->page = (void *)addr;
|
||||||
list_add(&page->list, &pages);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
list_splice(&pages, head);
|
list_splice(&pages, head);
|
||||||
@ -262,6 +261,7 @@ static struct ring_buffer_per_cpu *
|
|||||||
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
||||||
{
|
{
|
||||||
struct ring_buffer_per_cpu *cpu_buffer;
|
struct ring_buffer_per_cpu *cpu_buffer;
|
||||||
|
struct buffer_page *page;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -275,10 +275,17 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
|||||||
spin_lock_init(&cpu_buffer->lock);
|
spin_lock_init(&cpu_buffer->lock);
|
||||||
INIT_LIST_HEAD(&cpu_buffer->pages);
|
INIT_LIST_HEAD(&cpu_buffer->pages);
|
||||||
|
|
||||||
|
page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
|
||||||
|
GFP_KERNEL, cpu_to_node(cpu));
|
||||||
|
if (!page)
|
||||||
|
goto fail_free_buffer;
|
||||||
|
|
||||||
|
cpu_buffer->reader_page = page;
|
||||||
addr = __get_free_page(GFP_KERNEL);
|
addr = __get_free_page(GFP_KERNEL);
|
||||||
if (!addr)
|
if (!addr)
|
||||||
goto fail_free_buffer;
|
goto fail_free_reader;
|
||||||
cpu_buffer->reader_page = (struct buffer_page *)virt_to_page(addr);
|
page->page = (void *)addr;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
|
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
|
||||||
cpu_buffer->reader_page->size = 0;
|
cpu_buffer->reader_page->size = 0;
|
||||||
|
|
||||||
@ -523,11 +530,16 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
|
|||||||
|
|
||||||
for_each_buffer_cpu(buffer, cpu) {
|
for_each_buffer_cpu(buffer, cpu) {
|
||||||
for (i = 0; i < new_pages; i++) {
|
for (i = 0; i < new_pages; i++) {
|
||||||
|
page = kzalloc_node(ALIGN(sizeof(*page),
|
||||||
|
cache_line_size()),
|
||||||
|
GFP_KERNEL, cpu_to_node(cpu));
|
||||||
|
if (!page)
|
||||||
|
goto free_pages;
|
||||||
|
list_add(&page->list, &pages);
|
||||||
addr = __get_free_page(GFP_KERNEL);
|
addr = __get_free_page(GFP_KERNEL);
|
||||||
if (!addr)
|
if (!addr)
|
||||||
goto free_pages;
|
goto free_pages;
|
||||||
page = (struct buffer_page *)virt_to_page(addr);
|
page->page = (void *)addr;
|
||||||
list_add(&page->list, &pages);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -567,9 +579,7 @@ static inline int rb_null_event(struct ring_buffer_event *event)
|
|||||||
|
|
||||||
static inline void *rb_page_index(struct buffer_page *page, unsigned index)
|
static inline void *rb_page_index(struct buffer_page *page, unsigned index)
|
||||||
{
|
{
|
||||||
void *addr = page_address(&page->page);
|
return page->page + index;
|
||||||
|
|
||||||
return addr + index;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct ring_buffer_event *
|
static inline struct ring_buffer_event *
|
||||||
|
Loading…
Reference in New Issue
Block a user