mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
5a0e3ad6af
percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
375 lines
9.0 KiB
C
375 lines
9.0 KiB
C
/*
|
|
* Memory mapped I/O tracing
|
|
*
|
|
* Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
|
|
*/
|
|
|
|
#define DEBUG 1
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mmiotrace.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/time.h>
|
|
|
|
#include <asm/atomic.h>
|
|
|
|
#include "trace.h"
|
|
#include "trace_output.h"
|
|
|
|
struct header_iter {
|
|
struct pci_dev *dev;
|
|
};
|
|
|
|
static struct trace_array *mmio_trace_array;
|
|
static bool overrun_detected;
|
|
static unsigned long prev_overruns;
|
|
static atomic_t dropped_count;
|
|
|
|
static void mmio_reset_data(struct trace_array *tr)
|
|
{
|
|
overrun_detected = false;
|
|
prev_overruns = 0;
|
|
|
|
tracing_reset_online_cpus(tr);
|
|
}
|
|
|
|
static int mmio_trace_init(struct trace_array *tr)
|
|
{
|
|
pr_debug("in %s\n", __func__);
|
|
mmio_trace_array = tr;
|
|
|
|
mmio_reset_data(tr);
|
|
enable_mmiotrace();
|
|
return 0;
|
|
}
|
|
|
|
static void mmio_trace_reset(struct trace_array *tr)
|
|
{
|
|
pr_debug("in %s\n", __func__);
|
|
|
|
disable_mmiotrace();
|
|
mmio_reset_data(tr);
|
|
mmio_trace_array = NULL;
|
|
}
|
|
|
|
static void mmio_trace_start(struct trace_array *tr)
|
|
{
|
|
pr_debug("in %s\n", __func__);
|
|
mmio_reset_data(tr);
|
|
}
|
|
|
|
static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
|
|
{
|
|
int ret = 0;
|
|
int i;
|
|
resource_size_t start, end;
|
|
const struct pci_driver *drv = pci_dev_driver(dev);
|
|
|
|
/* XXX: incomplete checks for trace_seq_printf() return value */
|
|
ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
|
|
dev->bus->number, dev->devfn,
|
|
dev->vendor, dev->device, dev->irq);
|
|
/*
|
|
* XXX: is pci_resource_to_user() appropriate, since we are
|
|
* supposed to interpret the __ioremap() phys_addr argument based on
|
|
* these printed values?
|
|
*/
|
|
for (i = 0; i < 7; i++) {
|
|
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
|
|
ret += trace_seq_printf(s, " %llx",
|
|
(unsigned long long)(start |
|
|
(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
|
|
}
|
|
for (i = 0; i < 7; i++) {
|
|
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
|
|
ret += trace_seq_printf(s, " %llx",
|
|
dev->resource[i].start < dev->resource[i].end ?
|
|
(unsigned long long)(end - start) + 1 : 0);
|
|
}
|
|
if (drv)
|
|
ret += trace_seq_printf(s, " %s\n", drv->name);
|
|
else
|
|
ret += trace_seq_printf(s, " \n");
|
|
return ret;
|
|
}
|
|
|
|
static void destroy_header_iter(struct header_iter *hiter)
|
|
{
|
|
if (!hiter)
|
|
return;
|
|
pci_dev_put(hiter->dev);
|
|
kfree(hiter);
|
|
}
|
|
|
|
static void mmio_pipe_open(struct trace_iterator *iter)
|
|
{
|
|
struct header_iter *hiter;
|
|
struct trace_seq *s = &iter->seq;
|
|
|
|
trace_seq_printf(s, "VERSION 20070824\n");
|
|
|
|
hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
|
|
if (!hiter)
|
|
return;
|
|
|
|
hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
|
|
iter->private = hiter;
|
|
}
|
|
|
|
/* XXX: This is not called when the pipe is closed! */
|
|
static void mmio_close(struct trace_iterator *iter)
|
|
{
|
|
struct header_iter *hiter = iter->private;
|
|
destroy_header_iter(hiter);
|
|
iter->private = NULL;
|
|
}
|
|
|
|
static unsigned long count_overruns(struct trace_iterator *iter)
|
|
{
|
|
unsigned long cnt = atomic_xchg(&dropped_count, 0);
|
|
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
|
|
|
|
if (over > prev_overruns)
|
|
cnt += over - prev_overruns;
|
|
prev_overruns = over;
|
|
return cnt;
|
|
}
|
|
|
|
static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
|
|
char __user *ubuf, size_t cnt, loff_t *ppos)
|
|
{
|
|
ssize_t ret;
|
|
struct header_iter *hiter = iter->private;
|
|
struct trace_seq *s = &iter->seq;
|
|
unsigned long n;
|
|
|
|
n = count_overruns(iter);
|
|
if (n) {
|
|
/* XXX: This is later than where events were lost. */
|
|
trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
|
|
if (!overrun_detected)
|
|
pr_warning("mmiotrace has lost events.\n");
|
|
overrun_detected = true;
|
|
goto print_out;
|
|
}
|
|
|
|
if (!hiter)
|
|
return 0;
|
|
|
|
mmio_print_pcidev(s, hiter->dev);
|
|
hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
|
|
|
|
if (!hiter->dev) {
|
|
destroy_header_iter(hiter);
|
|
iter->private = NULL;
|
|
}
|
|
|
|
print_out:
|
|
ret = trace_seq_to_user(s, ubuf, cnt);
|
|
return (ret == -EBUSY) ? 0 : ret;
|
|
}
|
|
|
|
static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
|
|
{
|
|
struct trace_entry *entry = iter->ent;
|
|
struct trace_mmiotrace_rw *field;
|
|
struct mmiotrace_rw *rw;
|
|
struct trace_seq *s = &iter->seq;
|
|
unsigned long long t = ns2usecs(iter->ts);
|
|
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
|
|
unsigned secs = (unsigned long)t;
|
|
int ret = 1;
|
|
|
|
trace_assign_type(field, entry);
|
|
rw = &field->rw;
|
|
|
|
switch (rw->opcode) {
|
|
case MMIO_READ:
|
|
ret = trace_seq_printf(s,
|
|
"R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
|
|
rw->width, secs, usec_rem, rw->map_id,
|
|
(unsigned long long)rw->phys,
|
|
rw->value, rw->pc, 0);
|
|
break;
|
|
case MMIO_WRITE:
|
|
ret = trace_seq_printf(s,
|
|
"W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
|
|
rw->width, secs, usec_rem, rw->map_id,
|
|
(unsigned long long)rw->phys,
|
|
rw->value, rw->pc, 0);
|
|
break;
|
|
case MMIO_UNKNOWN_OP:
|
|
ret = trace_seq_printf(s,
|
|
"UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
|
|
"%02lx 0x%lx %d\n",
|
|
secs, usec_rem, rw->map_id,
|
|
(unsigned long long)rw->phys,
|
|
(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
|
|
(rw->value >> 0) & 0xff, rw->pc, 0);
|
|
break;
|
|
default:
|
|
ret = trace_seq_printf(s, "rw what?\n");
|
|
break;
|
|
}
|
|
if (ret)
|
|
return TRACE_TYPE_HANDLED;
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
}
|
|
|
|
static enum print_line_t mmio_print_map(struct trace_iterator *iter)
|
|
{
|
|
struct trace_entry *entry = iter->ent;
|
|
struct trace_mmiotrace_map *field;
|
|
struct mmiotrace_map *m;
|
|
struct trace_seq *s = &iter->seq;
|
|
unsigned long long t = ns2usecs(iter->ts);
|
|
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
|
|
unsigned secs = (unsigned long)t;
|
|
int ret;
|
|
|
|
trace_assign_type(field, entry);
|
|
m = &field->map;
|
|
|
|
switch (m->opcode) {
|
|
case MMIO_PROBE:
|
|
ret = trace_seq_printf(s,
|
|
"MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
|
|
secs, usec_rem, m->map_id,
|
|
(unsigned long long)m->phys, m->virt, m->len,
|
|
0UL, 0);
|
|
break;
|
|
case MMIO_UNPROBE:
|
|
ret = trace_seq_printf(s,
|
|
"UNMAP %u.%06lu %d 0x%lx %d\n",
|
|
secs, usec_rem, m->map_id, 0UL, 0);
|
|
break;
|
|
default:
|
|
ret = trace_seq_printf(s, "map what?\n");
|
|
break;
|
|
}
|
|
if (ret)
|
|
return TRACE_TYPE_HANDLED;
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
}
|
|
|
|
static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
|
|
{
|
|
struct trace_entry *entry = iter->ent;
|
|
struct print_entry *print = (struct print_entry *)entry;
|
|
const char *msg = print->buf;
|
|
struct trace_seq *s = &iter->seq;
|
|
unsigned long long t = ns2usecs(iter->ts);
|
|
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
|
|
unsigned secs = (unsigned long)t;
|
|
int ret;
|
|
|
|
/* The trailing newline must be in the message. */
|
|
ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
|
|
if (!ret)
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
|
|
return TRACE_TYPE_HANDLED;
|
|
}
|
|
|
|
static enum print_line_t mmio_print_line(struct trace_iterator *iter)
|
|
{
|
|
switch (iter->ent->type) {
|
|
case TRACE_MMIO_RW:
|
|
return mmio_print_rw(iter);
|
|
case TRACE_MMIO_MAP:
|
|
return mmio_print_map(iter);
|
|
case TRACE_PRINT:
|
|
return mmio_print_mark(iter);
|
|
default:
|
|
return TRACE_TYPE_HANDLED; /* ignore unknown entries */
|
|
}
|
|
}
|
|
|
|
static struct tracer mmio_tracer __read_mostly =
|
|
{
|
|
.name = "mmiotrace",
|
|
.init = mmio_trace_init,
|
|
.reset = mmio_trace_reset,
|
|
.start = mmio_trace_start,
|
|
.pipe_open = mmio_pipe_open,
|
|
.close = mmio_close,
|
|
.read = mmio_read,
|
|
.print_line = mmio_print_line,
|
|
};
|
|
|
|
__init static int init_mmio_trace(void)
|
|
{
|
|
return register_tracer(&mmio_tracer);
|
|
}
|
|
device_initcall(init_mmio_trace);
|
|
|
|
static void __trace_mmiotrace_rw(struct trace_array *tr,
|
|
struct trace_array_cpu *data,
|
|
struct mmiotrace_rw *rw)
|
|
{
|
|
struct ftrace_event_call *call = &event_mmiotrace_rw;
|
|
struct ring_buffer *buffer = tr->buffer;
|
|
struct ring_buffer_event *event;
|
|
struct trace_mmiotrace_rw *entry;
|
|
int pc = preempt_count();
|
|
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
|
|
sizeof(*entry), 0, pc);
|
|
if (!event) {
|
|
atomic_inc(&dropped_count);
|
|
return;
|
|
}
|
|
entry = ring_buffer_event_data(event);
|
|
entry->rw = *rw;
|
|
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
trace_buffer_unlock_commit(buffer, event, 0, pc);
|
|
}
|
|
|
|
void mmio_trace_rw(struct mmiotrace_rw *rw)
|
|
{
|
|
struct trace_array *tr = mmio_trace_array;
|
|
struct trace_array_cpu *data = tr->data[smp_processor_id()];
|
|
__trace_mmiotrace_rw(tr, data, rw);
|
|
}
|
|
|
|
static void __trace_mmiotrace_map(struct trace_array *tr,
|
|
struct trace_array_cpu *data,
|
|
struct mmiotrace_map *map)
|
|
{
|
|
struct ftrace_event_call *call = &event_mmiotrace_map;
|
|
struct ring_buffer *buffer = tr->buffer;
|
|
struct ring_buffer_event *event;
|
|
struct trace_mmiotrace_map *entry;
|
|
int pc = preempt_count();
|
|
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
|
|
sizeof(*entry), 0, pc);
|
|
if (!event) {
|
|
atomic_inc(&dropped_count);
|
|
return;
|
|
}
|
|
entry = ring_buffer_event_data(event);
|
|
entry->map = *map;
|
|
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
trace_buffer_unlock_commit(buffer, event, 0, pc);
|
|
}
|
|
|
|
void mmio_trace_mapping(struct mmiotrace_map *map)
|
|
{
|
|
struct trace_array *tr = mmio_trace_array;
|
|
struct trace_array_cpu *data;
|
|
|
|
preempt_disable();
|
|
data = tr->data[smp_processor_id()];
|
|
__trace_mmiotrace_map(tr, data, map);
|
|
preempt_enable();
|
|
}
|
|
|
|
int mmio_trace_printk(const char *fmt, va_list args)
|
|
{
|
|
return trace_vprintk(0, fmt, args);
|
|
}
|