relay: Use per CPU constructs for the relay channel buffer pointers

relay essentially needs to maintain a per CPU array of channel buffer
pointers but it manually creates that array.  Instead its better to use
the per CPU constructs, provided by the kernel, to allocate & access the
array of pointer to channel buffers.

Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://lkml.kernel.org/r/1470909140-25919-1-git-send-email-akash.goel@intel.com
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Akash Goel 2016-09-02 21:47:38 +02:00 committed by Thomas Gleixner
parent ee1e714b94
commit 017c59c042
2 changed files with 52 additions and 39 deletions

View File

@ -19,6 +19,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/percpu.h>
/* /*
* Tracks changes to rchan/rchan_buf structs * Tracks changes to rchan/rchan_buf structs
@ -63,7 +64,7 @@ struct rchan
struct kref kref; /* channel refcount */ struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */ void *private_data; /* for user-defined data */
size_t last_toobig; /* tried to log event > subbuf size */ size_t last_toobig; /* tried to log event > subbuf size */
struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */ struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
int is_global; /* One global buffer ? */ int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */ struct list_head list; /* for channel list */
struct dentry *parent; /* parent dentry passed to open */ struct dentry *parent; /* parent dentry passed to open */
@ -204,7 +205,7 @@ static inline void relay_write(struct rchan *chan,
struct rchan_buf *buf; struct rchan_buf *buf;
local_irq_save(flags); local_irq_save(flags);
buf = chan->buf[smp_processor_id()]; buf = *this_cpu_ptr(chan->buf);
if (unlikely(buf->offset + length > chan->subbuf_size)) if (unlikely(buf->offset + length > chan->subbuf_size))
length = relay_switch_subbuf(buf, length); length = relay_switch_subbuf(buf, length);
memcpy(buf->data + buf->offset, data, length); memcpy(buf->data + buf->offset, data, length);
@ -230,12 +231,12 @@ static inline void __relay_write(struct rchan *chan,
{ {
struct rchan_buf *buf; struct rchan_buf *buf;
buf = chan->buf[get_cpu()]; buf = *get_cpu_ptr(chan->buf);
if (unlikely(buf->offset + length > buf->chan->subbuf_size)) if (unlikely(buf->offset + length > buf->chan->subbuf_size))
length = relay_switch_subbuf(buf, length); length = relay_switch_subbuf(buf, length);
memcpy(buf->data + buf->offset, data, length); memcpy(buf->data + buf->offset, data, length);
buf->offset += length; buf->offset += length;
put_cpu(); put_cpu_ptr(chan->buf);
} }
/** /**
@ -251,17 +252,19 @@ static inline void __relay_write(struct rchan *chan,
*/ */
static inline void *relay_reserve(struct rchan *chan, size_t length) static inline void *relay_reserve(struct rchan *chan, size_t length)
{ {
void *reserved; void *reserved = NULL;
struct rchan_buf *buf = chan->buf[smp_processor_id()]; struct rchan_buf *buf = *get_cpu_ptr(chan->buf);
if (unlikely(buf->offset + length > buf->chan->subbuf_size)) { if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
length = relay_switch_subbuf(buf, length); length = relay_switch_subbuf(buf, length);
if (!length) if (!length)
return NULL; goto end;
} }
reserved = buf->data + buf->offset; reserved = buf->data + buf->offset;
buf->offset += length; buf->offset += length;
end:
put_cpu_ptr(chan->buf);
return reserved; return reserved;
} }

View File

@ -214,7 +214,7 @@ static void relay_destroy_buf(struct rchan_buf *buf)
__free_page(buf->page_array[i]); __free_page(buf->page_array[i]);
relay_free_page_array(buf->page_array); relay_free_page_array(buf->page_array);
} }
chan->buf[buf->cpu] = NULL; *per_cpu_ptr(chan->buf, buf->cpu) = NULL;
kfree(buf->padding); kfree(buf->padding);
kfree(buf); kfree(buf);
kref_put(&chan->kref, relay_destroy_channel); kref_put(&chan->kref, relay_destroy_channel);
@ -382,20 +382,21 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
*/ */
void relay_reset(struct rchan *chan) void relay_reset(struct rchan *chan)
{ {
struct rchan_buf *buf;
unsigned int i; unsigned int i;
if (!chan) if (!chan)
return; return;
if (chan->is_global && chan->buf[0]) { if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
__relay_reset(chan->buf[0], 0); __relay_reset(buf, 0);
return; return;
} }
mutex_lock(&relay_channels_mutex); mutex_lock(&relay_channels_mutex);
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (chan->buf[i]) if ((buf = *per_cpu_ptr(chan->buf, i)))
__relay_reset(chan->buf[i], 0); __relay_reset(buf, 0);
mutex_unlock(&relay_channels_mutex); mutex_unlock(&relay_channels_mutex);
} }
EXPORT_SYMBOL_GPL(relay_reset); EXPORT_SYMBOL_GPL(relay_reset);
@ -440,7 +441,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
struct dentry *dentry; struct dentry *dentry;
if (chan->is_global) if (chan->is_global)
return chan->buf[0]; return *per_cpu_ptr(chan->buf, 0);
buf = relay_create_buf(chan); buf = relay_create_buf(chan);
if (!buf) if (!buf)
@ -464,7 +465,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
__relay_reset(buf, 1); __relay_reset(buf, 1);
if(chan->is_global) { if(chan->is_global) {
chan->buf[0] = buf; *per_cpu_ptr(chan->buf, 0) = buf;
buf->cpu = 0; buf->cpu = 0;
} }
@ -526,22 +527,24 @@ static int relay_hotcpu_callback(struct notifier_block *nb,
{ {
unsigned int hotcpu = (unsigned long)hcpu; unsigned int hotcpu = (unsigned long)hcpu;
struct rchan *chan; struct rchan *chan;
struct rchan_buf *buf;
switch(action) { switch(action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN: case CPU_UP_PREPARE_FROZEN:
mutex_lock(&relay_channels_mutex); mutex_lock(&relay_channels_mutex);
list_for_each_entry(chan, &relay_channels, list) { list_for_each_entry(chan, &relay_channels, list) {
if (chan->buf[hotcpu]) if ((buf = *per_cpu_ptr(chan->buf, hotcpu)))
continue; continue;
chan->buf[hotcpu] = relay_open_buf(chan, hotcpu); buf = relay_open_buf(chan, hotcpu);
if(!chan->buf[hotcpu]) { if (!buf) {
printk(KERN_ERR printk(KERN_ERR
"relay_hotcpu_callback: cpu %d buffer " "relay_hotcpu_callback: cpu %d buffer "
"creation failed\n", hotcpu); "creation failed\n", hotcpu);
mutex_unlock(&relay_channels_mutex); mutex_unlock(&relay_channels_mutex);
return notifier_from_errno(-ENOMEM); return notifier_from_errno(-ENOMEM);
} }
*per_cpu_ptr(chan->buf, hotcpu) = buf;
} }
mutex_unlock(&relay_channels_mutex); mutex_unlock(&relay_channels_mutex);
break; break;
@ -583,6 +586,7 @@ struct rchan *relay_open(const char *base_filename,
{ {
unsigned int i; unsigned int i;
struct rchan *chan; struct rchan *chan;
struct rchan_buf *buf;
if (!(subbuf_size && n_subbufs)) if (!(subbuf_size && n_subbufs))
return NULL; return NULL;
@ -593,6 +597,7 @@ struct rchan *relay_open(const char *base_filename,
if (!chan) if (!chan)
return NULL; return NULL;
chan->buf = alloc_percpu(struct rchan_buf *);
chan->version = RELAYFS_CHANNEL_VERSION; chan->version = RELAYFS_CHANNEL_VERSION;
chan->n_subbufs = n_subbufs; chan->n_subbufs = n_subbufs;
chan->subbuf_size = subbuf_size; chan->subbuf_size = subbuf_size;
@ -608,9 +613,10 @@ struct rchan *relay_open(const char *base_filename,
mutex_lock(&relay_channels_mutex); mutex_lock(&relay_channels_mutex);
for_each_online_cpu(i) { for_each_online_cpu(i) {
chan->buf[i] = relay_open_buf(chan, i); buf = relay_open_buf(chan, i);
if (!chan->buf[i]) if (!buf)
goto free_bufs; goto free_bufs;
*per_cpu_ptr(chan->buf, i) = buf;
} }
list_add(&chan->list, &relay_channels); list_add(&chan->list, &relay_channels);
mutex_unlock(&relay_channels_mutex); mutex_unlock(&relay_channels_mutex);
@ -619,8 +625,8 @@ struct rchan *relay_open(const char *base_filename,
free_bufs: free_bufs:
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
if (chan->buf[i]) if ((buf = *per_cpu_ptr(chan->buf, i)))
relay_close_buf(chan->buf[i]); relay_close_buf(buf);
} }
kref_put(&chan->kref, relay_destroy_channel); kref_put(&chan->kref, relay_destroy_channel);
@ -666,6 +672,7 @@ int relay_late_setup_files(struct rchan *chan,
unsigned int i, curr_cpu; unsigned int i, curr_cpu;
unsigned long flags; unsigned long flags;
struct dentry *dentry; struct dentry *dentry;
struct rchan_buf *buf;
struct rchan_percpu_buf_dispatcher disp; struct rchan_percpu_buf_dispatcher disp;
if (!chan || !base_filename) if (!chan || !base_filename)
@ -684,10 +691,11 @@ int relay_late_setup_files(struct rchan *chan,
if (chan->is_global) { if (chan->is_global) {
err = -EINVAL; err = -EINVAL;
if (!WARN_ON_ONCE(!chan->buf[0])) { buf = *per_cpu_ptr(chan->buf, 0);
dentry = relay_create_buf_file(chan, chan->buf[0], 0); if (!WARN_ON_ONCE(!buf)) {
dentry = relay_create_buf_file(chan, buf, 0);
if (dentry && !WARN_ON_ONCE(!chan->is_global)) { if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
relay_set_buf_dentry(chan->buf[0], dentry); relay_set_buf_dentry(buf, dentry);
err = 0; err = 0;
} }
} }
@ -702,13 +710,14 @@ int relay_late_setup_files(struct rchan *chan,
* on all currently online CPUs. * on all currently online CPUs.
*/ */
for_each_online_cpu(i) { for_each_online_cpu(i) {
if (unlikely(!chan->buf[i])) { buf = *per_cpu_ptr(chan->buf, i);
if (unlikely(!buf)) {
WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
err = -EINVAL; err = -EINVAL;
break; break;
} }
dentry = relay_create_buf_file(chan, chan->buf[i], i); dentry = relay_create_buf_file(chan, buf, i);
if (unlikely(!dentry)) { if (unlikely(!dentry)) {
err = -EINVAL; err = -EINVAL;
break; break;
@ -716,10 +725,10 @@ int relay_late_setup_files(struct rchan *chan,
if (curr_cpu == i) { if (curr_cpu == i) {
local_irq_save(flags); local_irq_save(flags);
relay_set_buf_dentry(chan->buf[i], dentry); relay_set_buf_dentry(buf, dentry);
local_irq_restore(flags); local_irq_restore(flags);
} else { } else {
disp.buf = chan->buf[i]; disp.buf = buf;
disp.dentry = dentry; disp.dentry = dentry;
smp_mb(); smp_mb();
/* relay_channels_mutex must be held, so wait. */ /* relay_channels_mutex must be held, so wait. */
@ -822,11 +831,10 @@ void relay_subbufs_consumed(struct rchan *chan,
if (!chan) if (!chan)
return; return;
if (cpu >= NR_CPUS || !chan->buf[cpu] || buf = *per_cpu_ptr(chan->buf, cpu);
subbufs_consumed > chan->n_subbufs) if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs)
return; return;
buf = chan->buf[cpu];
if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
buf->subbufs_consumed = buf->subbufs_produced; buf->subbufs_consumed = buf->subbufs_produced;
else else
@ -842,18 +850,19 @@ EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
*/ */
void relay_close(struct rchan *chan) void relay_close(struct rchan *chan)
{ {
struct rchan_buf *buf;
unsigned int i; unsigned int i;
if (!chan) if (!chan)
return; return;
mutex_lock(&relay_channels_mutex); mutex_lock(&relay_channels_mutex);
if (chan->is_global && chan->buf[0]) if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0)))
relay_close_buf(chan->buf[0]); relay_close_buf(buf);
else else
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (chan->buf[i]) if ((buf = *per_cpu_ptr(chan->buf, i)))
relay_close_buf(chan->buf[i]); relay_close_buf(buf);
if (chan->last_toobig) if (chan->last_toobig)
printk(KERN_WARNING "relay: one or more items not logged " printk(KERN_WARNING "relay: one or more items not logged "
@ -874,20 +883,21 @@ EXPORT_SYMBOL_GPL(relay_close);
*/ */
void relay_flush(struct rchan *chan) void relay_flush(struct rchan *chan)
{ {
struct rchan_buf *buf;
unsigned int i; unsigned int i;
if (!chan) if (!chan)
return; return;
if (chan->is_global && chan->buf[0]) { if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
relay_switch_subbuf(chan->buf[0], 0); relay_switch_subbuf(buf, 0);
return; return;
} }
mutex_lock(&relay_channels_mutex); mutex_lock(&relay_channels_mutex);
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (chan->buf[i]) if ((buf = *per_cpu_ptr(chan->buf, i)))
relay_switch_subbuf(chan->buf[i], 0); relay_switch_subbuf(buf, 0);
mutex_unlock(&relay_channels_mutex); mutex_unlock(&relay_channels_mutex);
} }
EXPORT_SYMBOL_GPL(relay_flush); EXPORT_SYMBOL_GPL(relay_flush);