2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 12:14:01 +08:00

Merge branch 'perf/core' into perf/timer, before applying new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-03-27 10:10:47 +01:00
commit b381e63b48
236 changed files with 7929 additions and 2412 deletions

View File

@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Per-cpu breakpoints are not supported by our stepping
* mechanism.
*/
if (!bp->hw.bp_target)
if (!bp->hw.target)
return -EINVAL;
/*

View File

@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
*/
if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target)
if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
return -EINVAL;
return 0;

View File

@ -124,7 +124,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_flush_branch_stack(void) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
@ -350,6 +350,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
cpuhw->bhrb_context = event->ctx;
}
cpuhw->bhrb_users++;
perf_sched_cb_inc(event->ctx->pmu);
}
static void power_pmu_bhrb_disable(struct perf_event *event)
@ -361,6 +362,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
cpuhw->bhrb_users--;
WARN_ON_ONCE(cpuhw->bhrb_users < 0);
perf_sched_cb_dec(event->ctx->pmu);
if (!cpuhw->disabled && !cpuhw->bhrb_users) {
/* BHRB cannot be turned off when other
@ -375,9 +377,12 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
/* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch.
*/
static void power_pmu_flush_branch_stack(void)
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{
if (ppmu->bhrb_nr)
if (!ppmu->bhrb_nr)
return;
if (sched_in)
power_pmu_bhrb_reset();
}
/* Calculate the to address for a branch */
@ -1901,7 +1906,7 @@ static struct pmu power_pmu = {
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
.event_idx = power_pmu_event_idx,
.flush_branch_stack = power_pmu_flush_branch_stack,
.sched_task = power_pmu_sched_task,
};
/*

View File

@ -12,7 +12,7 @@
#include <asm/disabled-features.h>
#endif
#define NCAPINTS 11 /* N 32-bit words worth of info */
#define NCAPINTS 13 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@ -226,6 +226,7 @@
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
@ -242,6 +243,12 @@
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
/*
* BUG word(s)
*/

View File

@ -109,6 +109,9 @@ struct cpuinfo_x86 {
/* in KB - valid for CPUS which support this call: */
int x86_cache_size;
int x86_cache_alignment; /* In bytes */
/* Cache QoS architectural values: */
int x86_cache_max_rmid; /* max index */
int x86_cache_occ_scale; /* scale to bytes */
int x86_power;
unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */

View File

@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
endif
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o perf_event_intel_cqm.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
perf_event_intel_uncore_snb.o \

View File

@ -646,6 +646,30 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_capability[10] = eax;
}
/* Additional Intel-defined flags: level 0x0000000F */
if (c->cpuid_level >= 0x0000000F) {
u32 eax, ebx, ecx, edx;
/* QoS sub-leaf, EAX=0Fh, ECX=0 */
cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[11] = edx;
if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
/* will be overridden if occupancy monitoring exists */
c->x86_cache_max_rmid = ebx;
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
c->x86_capability[12] = edx;
if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
c->x86_cache_max_rmid = ecx;
c->x86_cache_occ_scale = ebx;
}
} else {
c->x86_cache_max_rmid = -1;
c->x86_cache_occ_scale = -1;
}
}
/* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl;
@ -834,6 +858,20 @@ static void generic_identify(struct cpuinfo_x86 *c)
detect_nopl(c);
}
static void x86_init_cache_qos(struct cpuinfo_x86 *c)
{
/*
* The heavy lifting of max_rmid and cache_occ_scale are handled
* in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu
* in case CQM bits really aren't there in this CPU.
*/
if (c != &boot_cpu_data) {
boot_cpu_data.x86_cache_max_rmid =
min(boot_cpu_data.x86_cache_max_rmid,
c->x86_cache_max_rmid);
}
}
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
@ -923,6 +961,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
init_hypervisor(c);
x86_init_rdrand(c);
x86_init_cache_qos(c);
/*
* Clear/Set all flags overriden by options, need do it

View File

@ -399,39 +399,41 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
/*
* check that PEBS LBR correction does not conflict with
* whatever the user is asking with attr->branch_sample_type
*/
if (event->attr.precise_ip > 1 &&
x86_pmu.intel_cap.pebs_format < 2) {
u64 *br_type = &event->attr.branch_sample_type;
}
/*
* check that PEBS LBR correction does not conflict with
* whatever the user is asking with attr->branch_sample_type
*/
if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
u64 *br_type = &event->attr.branch_sample_type;
if (has_branch_stack(event)) {
if (!precise_br_compat(event))
return -EOPNOTSUPP;
if (has_branch_stack(event)) {
if (!precise_br_compat(event))
return -EOPNOTSUPP;
/* branch_sample_type is compatible */
/* branch_sample_type is compatible */
} else {
/*
* user did not specify branch_sample_type
*
* For PEBS fixups, we capture all
* the branches at the priv level of the
* event.
*/
*br_type = PERF_SAMPLE_BRANCH_ANY;
} else {
/*
* user did not specify branch_sample_type
*
* For PEBS fixups, we capture all
* the branches at the priv level of the
* event.
*/
*br_type = PERF_SAMPLE_BRANCH_ANY;
if (!event->attr.exclude_user)
*br_type |= PERF_SAMPLE_BRANCH_USER;
if (!event->attr.exclude_user)
*br_type |= PERF_SAMPLE_BRANCH_USER;
if (!event->attr.exclude_kernel)
*br_type |= PERF_SAMPLE_BRANCH_KERNEL;
}
if (!event->attr.exclude_kernel)
*br_type |= PERF_SAMPLE_BRANCH_KERNEL;
}
}
if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
event->attach_state |= PERF_ATTACH_TASK_DATA;
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
@ -449,6 +451,12 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == PERF_TYPE_RAW)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
if (event->attr.sample_period && x86_pmu.limit_period) {
if (x86_pmu.limit_period(event, event->attr.sample_period) >
event->attr.sample_period)
return -EINVAL;
}
return x86_setup_perfctr(event);
}
@ -986,6 +994,9 @@ int x86_perf_event_set_period(struct perf_event *event)
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
if (x86_pmu.limit_period)
left = x86_pmu.limit_period(event, left);
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
@ -1033,7 +1044,6 @@ static int x86_pmu_add(struct perf_event *event, int flags)
hwc = &event->hw;
perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
ret = n = collect_events(cpuc, event, false);
if (ret < 0)
@ -1071,7 +1081,6 @@ done_collect:
ret = 0;
out:
perf_pmu_enable(event->pmu);
return ret;
}
@ -1914,10 +1923,10 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
NULL,
};
static void x86_pmu_flush_branch_stack(void)
static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{
if (x86_pmu.flush_branch_stack)
x86_pmu.flush_branch_stack();
if (x86_pmu.sched_task)
x86_pmu.sched_task(ctx, sched_in);
}
void perf_check_microcode(void)
@ -1949,7 +1958,8 @@ static struct pmu pmu = {
.commit_txn = x86_pmu_commit_txn,
.event_idx = x86_pmu_event_idx,
.flush_branch_stack = x86_pmu_flush_branch_stack,
.sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
};
void arch_perf_update_userpage(struct perf_event *event,

View File

@ -451,6 +451,7 @@ struct x86_pmu {
struct x86_pmu_quirk *quirks;
int perfctr_second_write;
bool late_ack;
unsigned (*limit_period)(struct perf_event *event, unsigned l);
/*
* sysfs attrs
@ -472,7 +473,8 @@ struct x86_pmu {
void (*cpu_dead)(int cpu);
void (*check_microcode)(void);
void (*flush_branch_stack)(void);
void (*sched_task)(struct perf_event_context *ctx,
bool sched_in);
/*
* Intel Arch Perfmon v2+
@ -515,6 +517,13 @@ struct x86_pmu {
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
};
struct x86_perf_task_context {
u64 lbr_from[MAX_LBR_ENTRIES];
u64 lbr_to[MAX_LBR_ENTRIES];
int lbr_callstack_users;
int lbr_stack_state;
};
#define x86_add_quirk(func_) \
do { \
static struct x86_pmu_quirk __quirk __initdata = { \
@ -546,6 +555,12 @@ static struct perf_pmu_events_attr event_attr_##v = { \
extern struct x86_pmu x86_pmu __read_mostly;
static inline bool x86_pmu_has_lbr_callstack(void)
{
return x86_pmu.lbr_sel_map &&
x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
}
DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
int x86_perf_event_set_period(struct perf_event *event);
@ -727,6 +742,8 @@ void intel_pmu_pebs_disable_all(void);
void intel_ds_init(void);
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
void intel_pmu_lbr_reset(void);
void intel_pmu_lbr_enable(struct perf_event *event);
@ -747,6 +764,8 @@ void intel_pmu_lbr_init_atom(void);
void intel_pmu_lbr_init_snb(void);
void intel_pmu_lbr_init_hsw(void);
int intel_pmu_setup_lbr_filter(struct perf_event *event);
int p4_pmu_init(void);

View File

@ -796,7 +796,7 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
* the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
* is using the new offset.
*/
static int force_ibs_eilvt_setup(void)
static void force_ibs_eilvt_setup(void)
{
int offset;
int ret;
@ -811,26 +811,24 @@ static int force_ibs_eilvt_setup(void)
if (offset == APIC_EILVT_NR_MAX) {
printk(KERN_DEBUG "No EILVT entry available\n");
return -EBUSY;
return;
}
ret = setup_ibs_ctl(offset);
if (ret)
goto out;
if (!ibs_eilvt_valid()) {
ret = -EFAULT;
if (!ibs_eilvt_valid())
goto out;
}
pr_info("IBS: LVT offset %d assigned\n", offset);
return 0;
return;
out:
preempt_disable();
put_eilvt(offset);
preempt_enable();
return ret;
return;
}
static void ibs_eilvt_setup(void)

View File

@ -220,6 +220,15 @@ static struct event_constraint intel_hsw_event_constraints[] = {
EVENT_CONSTRAINT_END
};
struct event_constraint intel_bdw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
EVENT_CONSTRAINT_END
};
static u64 intel_pmu_event_map(int hw_event)
{
return intel_perfmon_event_map[hw_event];
@ -415,6 +424,202 @@ static __initconst const u64 snb_hw_cache_event_ids
};
/*
* Notes on the events:
* - data reads do not include code reads (comparable to earlier tables)
* - data counts include speculative execution (except L1 write, dtlb, bpu)
* - remote node access includes remote memory, remote cache, remote mmio.
* - prefetches are not included in the counts because they are not
* reliably counted.
*/
#define HSW_DEMAND_DATA_RD BIT_ULL(0)
#define HSW_DEMAND_RFO BIT_ULL(1)
#define HSW_ANY_RESPONSE BIT_ULL(16)
#define HSW_SUPPLIER_NONE BIT_ULL(17)
#define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
#define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
#define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
#define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
#define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
HSW_L3_MISS_REMOTE_HOP2P)
#define HSW_SNOOP_NONE BIT_ULL(31)
#define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
#define HSW_SNOOP_MISS BIT_ULL(33)
#define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
#define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
#define HSW_SNOOP_HITM BIT_ULL(36)
#define HSW_SNOOP_NON_DRAM BIT_ULL(37)
#define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
#define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
#define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
#define HSW_DEMAND_WRITE HSW_DEMAND_RFO
#define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
#define HSW_LLC_ACCESS HSW_ANY_RESPONSE
#define BDW_L3_MISS_LOCAL BIT(26)
#define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
HSW_L3_MISS_REMOTE_HOP2P)
static __initconst const u64 hsw_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
[ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
[ C(RESULT_MISS) ] = 0x0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
[ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
[ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
[ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
[ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
[ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
[ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
[ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
[ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
};
static __initconst const u64 hsw_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
HSW_LLC_ACCESS,
[ C(RESULT_MISS) ] = HSW_DEMAND_READ|
HSW_L3_MISS|HSW_ANY_SNOOP,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
HSW_LLC_ACCESS,
[ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
HSW_L3_MISS|HSW_ANY_SNOOP,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
HSW_L3_MISS_LOCAL_DRAM|
HSW_SNOOP_DRAM,
[ C(RESULT_MISS) ] = HSW_DEMAND_READ|
HSW_L3_MISS_REMOTE|
HSW_SNOOP_DRAM,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
HSW_L3_MISS_LOCAL_DRAM|
HSW_SNOOP_DRAM,
[ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
HSW_L3_MISS_REMOTE|
HSW_SNOOP_DRAM,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
};
static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@ -1029,20 +1234,6 @@ static __initconst const u64 slm_hw_cache_event_ids
},
};
static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
{
/* user explicitly requested branch sampling */
if (has_branch_stack(event))
return true;
/* implicit branch sampling to correct PEBS skid */
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
x86_pmu.intel_cap.pebs_format < 2)
return true;
return false;
}
static void intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@ -1207,7 +1398,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
* must disable before any actual event
* because any event may be combined with LBR
*/
if (intel_pmu_needs_lbr_smpl(event))
if (needs_branch_stack(event))
intel_pmu_lbr_disable(event);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@ -1268,7 +1459,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
* must enabled before any actual event
* because any event may be combined with LBR
*/
if (intel_pmu_needs_lbr_smpl(event))
if (needs_branch_stack(event))
intel_pmu_lbr_enable(event);
if (event->attr.exclude_host)
@ -1747,7 +1938,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip && x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
if (intel_pmu_needs_lbr_smpl(event)) {
if (needs_branch_stack(event)) {
ret = intel_pmu_setup_lbr_filter(event);
if (ret)
return ret;
@ -1905,6 +2096,32 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
return c;
}
/*
* Broadwell:
*
* The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
* (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
* the two to enforce a minimum period of 128 (the smallest value that has bits
* 0-5 cleared and >= 100).
*
* Because of how the code in x86_perf_event_set_period() works, the truncation
* of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
* to make up for the 'lost' events due to carrying the 'error' in period_left.
*
* Therefore the effective (average) period matches the requested period,
* despite coarser hardware granularity.
*/
static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
{
if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
X86_CONFIG(.event=0xc0, .umask=0x01)) {
if (left < 128)
left = 128;
left &= ~0x3fu;
}
return left;
}
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
@ -2044,18 +2261,6 @@ static void intel_pmu_cpu_dying(int cpu)
fini_debug_store_on_cpu(cpu);
}
static void intel_pmu_flush_branch_stack(void)
{
/*
* Intel LBR does not tag entries with the
* PID of the current task, then we need to
* flush it on ctxsw
* For now, we simply reset it
*/
if (x86_pmu.lbr_nr)
intel_pmu_lbr_reset();
}
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@ -2107,7 +2312,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
.guest_get_msrs = intel_guest_get_msrs,
.flush_branch_stack = intel_pmu_flush_branch_stack,
.sched_task = intel_pmu_lbr_sched_task,
};
static __init void intel_clovertown_quirk(void)
@ -2546,10 +2751,10 @@ __init int intel_pmu_init(void)
case 69: /* 22nm Haswell ULT */
case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
intel_pmu_lbr_init_hsw();
x86_pmu.event_constraints = intel_hsw_event_constraints;
x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
@ -2566,6 +2771,39 @@ __init int intel_pmu_init(void)
pr_cont("Haswell events, ");
break;
case 61: /* 14nm Broadwell Core-M */
case 86: /* 14nm Broadwell Xeon D */
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
/* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
BDW_L3_MISS|HSW_SNOOP_DRAM;
hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
HSW_SNOOP_DRAM;
hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_bdw_event_constraints;
x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.limit_period = bdw_limit_period;
pr_cont("Broadwell events, ");
break;
default:
switch (x86_pmu.version) {
case 1:

File diff suppressed because it is too large Load Diff

View File

@ -39,6 +39,7 @@ static enum {
#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
#define LBR_FAR_BIT 8 /* do not capture far branches */
#define LBR_CALL_STACK_BIT 9 /* enable call stack */
#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
#define LBR_USER (1 << LBR_USER_BIT)
@ -49,6 +50,7 @@ static enum {
#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
#define LBR_FAR (1 << LBR_FAR_BIT)
#define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
#define LBR_PLM (LBR_KERNEL | LBR_USER)
@ -69,33 +71,31 @@ static enum {
#define LBR_FROM_FLAG_IN_TX (1ULL << 62)
#define LBR_FROM_FLAG_ABORT (1ULL << 61)
#define for_each_branch_sample_type(x) \
for ((x) = PERF_SAMPLE_BRANCH_USER; \
(x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1)
/*
* x86control flow change classification
* x86control flow changes include branches, interrupts, traps, faults
*/
enum {
X86_BR_NONE = 0, /* unknown */
X86_BR_NONE = 0, /* unknown */
X86_BR_USER = 1 << 0, /* branch target is user */
X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
X86_BR_USER = 1 << 0, /* branch target is user */
X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
X86_BR_CALL = 1 << 2, /* call */
X86_BR_RET = 1 << 3, /* return */
X86_BR_SYSCALL = 1 << 4, /* syscall */
X86_BR_SYSRET = 1 << 5, /* syscall return */
X86_BR_INT = 1 << 6, /* sw interrupt */
X86_BR_IRET = 1 << 7, /* return from interrupt */
X86_BR_JCC = 1 << 8, /* conditional */
X86_BR_JMP = 1 << 9, /* jump */
X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
X86_BR_IND_CALL = 1 << 11,/* indirect calls */
X86_BR_ABORT = 1 << 12,/* transaction abort */
X86_BR_IN_TX = 1 << 13,/* in transaction */
X86_BR_NO_TX = 1 << 14,/* not in transaction */
X86_BR_CALL = 1 << 2, /* call */
X86_BR_RET = 1 << 3, /* return */
X86_BR_SYSCALL = 1 << 4, /* syscall */
X86_BR_SYSRET = 1 << 5, /* syscall return */
X86_BR_INT = 1 << 6, /* sw interrupt */
X86_BR_IRET = 1 << 7, /* return from interrupt */
X86_BR_JCC = 1 << 8, /* conditional */
X86_BR_JMP = 1 << 9, /* jump */
X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
X86_BR_IND_CALL = 1 << 11,/* indirect calls */
X86_BR_ABORT = 1 << 12,/* transaction abort */
X86_BR_IN_TX = 1 << 13,/* in transaction */
X86_BR_NO_TX = 1 << 14,/* not in transaction */
X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
X86_BR_CALL_STACK = 1 << 16,/* call stack */
};
#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
@ -112,13 +112,15 @@ enum {
X86_BR_JMP |\
X86_BR_IRQ |\
X86_BR_ABORT |\
X86_BR_IND_CALL)
X86_BR_IND_CALL |\
X86_BR_ZERO_CALL)
#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
#define X86_BR_ANY_CALL \
(X86_BR_CALL |\
X86_BR_IND_CALL |\
X86_BR_ZERO_CALL |\
X86_BR_SYSCALL |\
X86_BR_IRQ |\
X86_BR_INT)
@ -132,14 +134,23 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
static void __intel_pmu_lbr_enable(void)
{
u64 debugctl;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
u64 debugctl, lbr_select = 0;
if (cpuc->lbr_sel)
wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
if (cpuc->lbr_sel) {
lbr_select = cpuc->lbr_sel->config;
wrmsrl(MSR_LBR_SELECT, lbr_select);
}
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
debugctl |= DEBUGCTLMSR_LBR;
/*
* LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
* If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
* may cause superfluous increase/decrease of LBR_TOS.
*/
if (!(lbr_select & LBR_CALL_STACK))
debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
}
@ -181,9 +192,116 @@ void intel_pmu_lbr_reset(void)
intel_pmu_lbr_reset_64();
}
/*
* TOS = most recently recorded branch
*/
static inline u64 intel_pmu_lbr_tos(void)
{
u64 tos;
rdmsrl(x86_pmu.lbr_tos, tos);
return tos;
}
enum {
LBR_NONE,
LBR_VALID,
};
static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
{
int i;
unsigned lbr_idx, mask;
u64 tos;
if (task_ctx->lbr_callstack_users == 0 ||
task_ctx->lbr_stack_state == LBR_NONE) {
intel_pmu_lbr_reset();
return;
}
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
for (i = 0; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
}
task_ctx->lbr_stack_state = LBR_NONE;
}
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
int i;
unsigned lbr_idx, mask;
u64 tos;
if (task_ctx->lbr_callstack_users == 0) {
task_ctx->lbr_stack_state = LBR_NONE;
return;
}
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
for (i = 0; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
}
task_ctx->lbr_stack_state = LBR_VALID;
}
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
/*
* If LBR callstack feature is enabled and the stack was saved when
* the task was scheduled out, restore the stack. Otherwise flush
* the LBR stack.
*/
task_ctx = ctx ? ctx->task_ctx_data : NULL;
if (task_ctx) {
if (sched_in) {
__intel_pmu_lbr_restore(task_ctx);
cpuc->lbr_context = ctx;
} else {
__intel_pmu_lbr_save(task_ctx);
}
return;
}
/*
* When sampling the branck stack in system-wide, it may be
* necessary to flush the stack on context switch. This happens
* when the branch stack does not tag its entries with the pid
* of the current task. Otherwise it becomes impossible to
* associate a branch entry with a task. This ambiguity is more
* likely to appear when the branch stack supports priv level
* filtering and the user sets it to monitor only at the user
* level (which could be a useful measurement in system-wide
* mode). In that case, the risk is high of having a branch
* stack with branch from multiple tasks.
*/
if (sched_in) {
intel_pmu_lbr_reset();
cpuc->lbr_context = ctx;
}
}
static inline bool branch_user_callstack(unsigned br_sel)
{
return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
}
void intel_pmu_lbr_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
@ -198,18 +316,33 @@ void intel_pmu_lbr_enable(struct perf_event *event)
}
cpuc->br_sel = event->hw.branch_reg.reg;
if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
event->ctx->task_ctx_data) {
task_ctx = event->ctx->task_ctx_data;
task_ctx->lbr_callstack_users++;
}
cpuc->lbr_users++;
perf_sched_cb_inc(event->ctx->pmu);
}
void intel_pmu_lbr_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx;
if (!x86_pmu.lbr_nr)
return;
if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
event->ctx->task_ctx_data) {
task_ctx = event->ctx->task_ctx_data;
task_ctx->lbr_callstack_users--;
}
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);
perf_sched_cb_dec(event->ctx->pmu);
if (cpuc->enabled && !cpuc->lbr_users) {
__intel_pmu_lbr_disable();
@ -234,18 +367,6 @@ void intel_pmu_lbr_disable_all(void)
__intel_pmu_lbr_disable();
}
/*
* TOS = most recently recorded branch
*/
static inline u64 intel_pmu_lbr_tos(void)
{
u64 tos;
rdmsrl(x86_pmu.lbr_tos, tos);
return tos;
}
static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
{
unsigned long mask = x86_pmu.lbr_nr - 1;
@ -350,7 +471,7 @@ void intel_pmu_lbr_read(void)
* - in case there is no HW filter
* - in case the HW filter has errata or limitations
*/
static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
{
u64 br_type = event->attr.branch_sample_type;
int mask = 0;
@ -387,11 +508,21 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
if (br_type & PERF_SAMPLE_BRANCH_COND)
mask |= X86_BR_JCC;
if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
if (!x86_pmu_has_lbr_callstack())
return -EOPNOTSUPP;
if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
return -EINVAL;
mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
X86_BR_CALL_STACK;
}
/*
* stash actual user request into reg, it may
* be used by fixup code for some CPU
*/
event->hw.branch_reg.reg = mask;
return 0;
}
/*
@ -403,14 +534,14 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
{
struct hw_perf_event_extra *reg;
u64 br_type = event->attr.branch_sample_type;
u64 mask = 0, m;
u64 v;
u64 mask = 0, v;
int i;
for_each_branch_sample_type(m) {
if (!(br_type & m))
for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
if (!(br_type & (1ULL << i)))
continue;
v = x86_pmu.lbr_sel_map[m];
v = x86_pmu.lbr_sel_map[i];
if (v == LBR_NOT_SUPP)
return -EOPNOTSUPP;
@ -420,8 +551,12 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
reg = &event->hw.branch_reg;
reg->idx = EXTRA_REG_LBR;
/* LBR_SELECT operates in suppress mode so invert mask */
reg->config = ~mask & x86_pmu.lbr_sel_mask;
/*
* The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
* in suppress mode. So LBR_SELECT should be set to
* (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
*/
reg->config = mask ^ x86_pmu.lbr_sel_mask;
return 0;
}
@ -439,7 +574,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
/*
* setup SW LBR filter
*/
intel_pmu_setup_sw_lbr_filter(event);
ret = intel_pmu_setup_sw_lbr_filter(event);
if (ret)
return ret;
/*
* setup HW LBR filter, if any
@ -568,6 +705,12 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
ret = X86_BR_INT;
break;
case 0xe8: /* call near rel */
insn_get_immediate(&insn);
if (insn.immediate1.value == 0) {
/* zero length call */
ret = X86_BR_ZERO_CALL;
break;
}
case 0x9a: /* call far absolute */
ret = X86_BR_CALL;
break;
@ -678,35 +821,49 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
/*
* Map interface branch filters onto LBR filters
*/
static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
[PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
[PERF_SAMPLE_BRANCH_USER] = LBR_USER,
[PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
[PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
[PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP
| LBR_IND_JMP | LBR_FAR,
static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
[PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
| LBR_IND_JMP | LBR_FAR,
/*
* NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
*/
[PERF_SAMPLE_BRANCH_ANY_CALL] =
[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
/*
* NHM/WSM erratum: must include IND_JMP to capture IND_CALL
*/
[PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP,
[PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
};
static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
[PERF_SAMPLE_BRANCH_ANY] = LBR_ANY,
[PERF_SAMPLE_BRANCH_USER] = LBR_USER,
[PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL,
[PERF_SAMPLE_BRANCH_HV] = LBR_IGN,
[PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR,
[PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL
| LBR_FAR,
[PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL,
[PERF_SAMPLE_BRANCH_COND] = LBR_JCC,
static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
[PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
| LBR_FAR,
[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
};
static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
[PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
| LBR_FAR,
[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
| LBR_RETURN | LBR_CALL_STACK,
};
/* core */
@ -765,6 +922,20 @@ void __init intel_pmu_lbr_init_snb(void)
pr_cont("16-deep LBR, ");
}
/* haswell */
void intel_pmu_lbr_init_hsw(void)
{
x86_pmu.lbr_nr = 16;
x86_pmu.lbr_tos = MSR_LBR_TOS;
x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
x86_pmu.lbr_to = MSR_LBR_NHM_TO;
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
pr_cont("16-deep LBR, ");
}
/* atom */
void __init intel_pmu_lbr_init_atom(void)
{

View File

@ -1132,8 +1132,7 @@ static int snbep_pci2phy_map_init(int devid)
}
}
if (ubox_dev)
pci_dev_put(ubox_dev);
pci_dev_put(ubox_dev);
return err ? pcibios_err_to_errno(err) : 0;
}

View File

@ -354,6 +354,7 @@ int __copy_instruction(u8 *dest, u8 *src)
{
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
int length;
unsigned long recovered_insn =
recover_probed_instruction(buf, (unsigned long)src);
@ -361,16 +362,18 @@ int __copy_instruction(u8 *dest, u8 *src)
return 0;
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
insn_get_length(&insn);
length = insn.length;
/* Another subsystem puts a breakpoint, failed to recover */
if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
return 0;
memcpy(dest, insn.kaddr, insn.length);
memcpy(dest, insn.kaddr, length);
#ifdef CONFIG_X86_64
if (insn_rip_relative(&insn)) {
s64 newdisp;
u8 *disp;
kernel_insn_init(&insn, dest, insn.length);
kernel_insn_init(&insn, dest, length);
insn_get_displacement(&insn);
/*
* The copied instruction uses the %rip-relative addressing
@ -394,7 +397,7 @@ int __copy_instruction(u8 *dest, u8 *src)
*(s32 *) disp = (s32) newdisp;
}
#endif
return insn.length;
return length;
}
static int arch_copy_kprobe(struct kprobe *p)

View File

@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <asm/local.h>
struct perf_callchain_entry {
@ -118,10 +119,16 @@ struct hw_perf_event {
struct hrtimer hrtimer;
};
struct { /* tracepoint */
struct task_struct *tp_target;
/* for tp_event->class */
struct list_head tp_list;
};
struct { /* intel_cqm */
int cqm_state;
int cqm_rmid;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
/*
@ -129,12 +136,12 @@ struct hw_perf_event {
* problem hw_breakpoint has with context
* creation and event initalization.
*/
struct task_struct *bp_target;
struct arch_hw_breakpoint info;
struct list_head bp_list;
};
#endif
};
struct task_struct *target;
int state;
local64_t prev_count;
u64 sample_period;
@ -262,9 +269,20 @@ struct pmu {
int (*event_idx) (struct perf_event *event); /*optional */
/*
* flush branch stack on context-switches (needed in cpu-wide mode)
* context-switches callback
*/
void (*flush_branch_stack) (void);
void (*sched_task) (struct perf_event_context *ctx,
bool sched_in);
/*
* PMU specific data size
*/
size_t task_ctx_size;
/*
* Return the count value for a counter.
*/
u64 (*count) (struct perf_event *event); /*optional*/
};
/**
@ -300,6 +318,7 @@ struct swevent_hlist {
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
#define PERF_ATTACH_TASK_DATA 0x08
struct perf_cgroup;
struct ring_buffer;
@ -504,7 +523,7 @@ struct perf_event_context {
u64 generation;
int pin_count;
int nr_cgroups; /* cgroup evts */
int nr_branch_stack; /* branch_stack evt */
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
struct delayed_work orphans_remove;
@ -540,6 +559,35 @@ struct perf_output_handle {
int page;
};
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
u64 time;
u64 timestamp;
};
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info __percpu *info;
};
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
return container_of(task_css(task, perf_event_cgrp_id),
struct perf_cgroup, css);
}
#endif /* CONFIG_CGROUP_PERF */
#ifdef CONFIG_PERF_EVENTS
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
@ -558,6 +606,8 @@ extern void perf_event_delayed_put(struct task_struct *task);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern int perf_event_refresh(struct perf_event *event, int refresh);
@ -731,6 +781,11 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
__perf_event_task_sched_out(prev, next);
}
static inline u64 __perf_event_count(struct perf_event *event)
{
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@ -800,6 +855,11 @@ static inline bool has_branch_stack(struct perf_event *event)
return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
}
static inline bool needs_branch_stack(struct perf_event *event)
{
return event->attr.branch_sample_type != 0;
}
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size);
extern void perf_output_end(struct perf_output_handle *handle);

View File

@ -152,21 +152,42 @@ enum perf_event_sample_format {
* The branch types can be combined, however BRANCH_ANY covers all types
* of branches and therefore it supersedes all the other types.
*/
enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */
PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */
PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */
PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */
PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */
PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
#define PERF_SAMPLE_BRANCH_PLM_ALL \

View File

@ -34,11 +34,11 @@
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/cgroup.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/compat.h>
@ -153,7 +153,7 @@ enum event_type_t {
*/
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@ -351,32 +351,6 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
u64 time;
u64 timestamp;
};
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info __percpu *info;
};
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
return container_of(task_css(task, perf_event_cgrp_id),
struct perf_cgroup, css);
}
static inline bool
perf_cgroup_match(struct perf_event *event)
{
@ -905,6 +879,15 @@ static void get_ctx(struct perf_event_context *ctx)
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
static void free_ctx(struct rcu_head *head)
{
struct perf_event_context *ctx;
ctx = container_of(head, struct perf_event_context, rcu_head);
kfree(ctx->task_ctx_data);
kfree(ctx);
}
static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
@ -912,7 +895,7 @@ static void put_ctx(struct perf_event_context *ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task)
put_task_struct(ctx->task);
kfree_rcu(ctx, rcu_head);
call_rcu(&ctx->rcu_head, free_ctx);
}
}
@ -1239,9 +1222,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
if (is_cgroup_event(event))
ctx->nr_cgroups++;
if (has_branch_stack(event))
ctx->nr_branch_stack++;
list_add_rcu(&event->event_entry, &ctx->event_list);
ctx->nr_events++;
if (event->attr.inherit_stat)
@ -1408,9 +1388,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
cpuctx->cgrp = NULL;
}
if (has_branch_stack(event))
ctx->nr_branch_stack--;
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
@ -1881,6 +1858,10 @@ event_sched_in(struct perf_event *event,
perf_pmu_disable(event->pmu);
event->tstamp_running += tstamp - event->tstamp_stopped;
perf_set_shadow_time(event, ctx, tstamp);
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
@ -1888,10 +1869,6 @@ event_sched_in(struct perf_event *event,
goto out;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
perf_set_shadow_time(event, ctx, tstamp);
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
@ -2559,6 +2536,9 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
next->perf_event_ctxp[ctxn] = ctx;
ctx->task = next;
next_ctx->task = task;
swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
do_switch = 0;
perf_event_sync_stat(ctx, next_ctx);
@ -2577,6 +2557,56 @@ unlock:
}
}
void perf_sched_cb_dec(struct pmu *pmu)
{
this_cpu_dec(perf_sched_cb_usages);
}
void perf_sched_cb_inc(struct pmu *pmu)
{
this_cpu_inc(perf_sched_cb_usages);
}
/*
* This function provides the context switch callback to the lower code
* layer. It is invoked ONLY when the context switch callback is enabled.
*/
static void perf_pmu_sched_task(struct task_struct *prev,
struct task_struct *next,
bool sched_in)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
if (prev == next)
return;
local_irq_save(flags);
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
if (pmu->sched_task) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->sched_task(cpuctx->task_ctx, sched_in);
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
#define for_each_task_context_nr(ctxn) \
for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
@ -2596,6 +2626,9 @@ void __perf_event_task_sched_out(struct task_struct *task,
{
int ctxn;
if (__this_cpu_read(perf_sched_cb_usages))
perf_pmu_sched_task(task, next, false);
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
@ -2754,64 +2787,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
perf_ctx_unlock(cpuctx, ctx);
}
/*
* When sampling the branck stack in system-wide, it may be necessary
* to flush the stack on context switch. This happens when the branch
* stack does not tag its entries with the pid of the current task.
* Otherwise it becomes impossible to associate a branch entry with a
* task. This ambiguity is more likely to appear when the branch stack
* supports priv level filtering and the user sets it to monitor only
* at the user level (which could be a useful measurement in system-wide
* mode). In that case, the risk is high of having a branch stack with
* branch from multiple tasks. Flushing may mean dropping the existing
* entries or stashing them somewhere in the PMU specific code layer.
*
* This function provides the context switch callback to the lower code
* layer. It is invoked ONLY when there is at least one system-wide context
* with at least one active event using taken branch sampling.
*/
static void perf_branch_stack_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/* no need to flush branch stack if not changing task */
if (prev == task)
return;
local_irq_save(flags);
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
/*
* check if the context has at least one
* event using PERF_SAMPLE_BRANCH_STACK
*/
if (cpuctx->ctx.nr_branch_stack > 0
&& pmu->flush_branch_stack) {
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->flush_branch_stack();
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
@ -2844,9 +2819,8 @@ void __perf_event_task_sched_in(struct task_struct *prev,
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
/* check for system-wide branch_stack events */
if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
perf_branch_stack_sched_in(prev, task);
if (__this_cpu_read(perf_sched_cb_usages))
perf_pmu_sched_task(prev, task, true);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@ -3220,7 +3194,10 @@ static void __perf_event_read(void *info)
static inline u64 perf_event_count(struct perf_event *event)
{
return local64_read(&event->count) + atomic64_read(&event->child_count);
if (event->pmu->count)
return event->pmu->count(event);
return __perf_event_count(event);
}
static u64 perf_event_read(struct perf_event *event)
@ -3321,12 +3298,15 @@ errout:
* Returns a matching context with refcount and pincount.
*/
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
find_get_context(struct pmu *pmu, struct task_struct *task,
struct perf_event *event)
{
struct perf_event_context *ctx, *clone_ctx = NULL;
struct perf_cpu_context *cpuctx;
void *task_ctx_data = NULL;
unsigned long flags;
int ctxn, err;
int cpu = event->cpu;
if (!task) {
/* Must be root to operate on a CPU event: */
@ -3354,11 +3334,24 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
if (ctxn < 0)
goto errout;
if (event->attach_state & PERF_ATTACH_TASK_DATA) {
task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
if (!task_ctx_data) {
err = -ENOMEM;
goto errout;
}
}
retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
clone_ctx = unclone_ctx(ctx);
++ctx->pin_count;
if (task_ctx_data && !ctx->task_ctx_data) {
ctx->task_ctx_data = task_ctx_data;
task_ctx_data = NULL;
}
raw_spin_unlock_irqrestore(&ctx->lock, flags);
if (clone_ctx)
@ -3369,6 +3362,11 @@ retry:
if (!ctx)
goto errout;
if (task_ctx_data) {
ctx->task_ctx_data = task_ctx_data;
task_ctx_data = NULL;
}
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
@ -3395,9 +3393,11 @@ retry:
}
}
kfree(task_ctx_data);
return ctx;
errout:
kfree(task_ctx_data);
return ERR_PTR(err);
}
@ -3423,10 +3423,6 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
if (event->parent)
return;
if (has_branch_stack(event)) {
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
}
if (is_cgroup_event(event))
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
}
@ -4574,6 +4570,13 @@ static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
int rctx;
rctx = perf_swevent_get_recursion_context();
/*
* If we 'fail' here, that's OK, it means recursion is already disabled
* and we won't recurse 'further'.
*/
if (event->pending_disable) {
event->pending_disable = 0;
@ -4584,6 +4587,9 @@ static void perf_pending_event(struct irq_work *entry)
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
}
/*
@ -6123,6 +6129,7 @@ static int perf_swevent_add(struct perf_event *event, int flags)
}
hlist_add_head_rcu(&event->hlist_entry, head);
perf_event_update_userpage(event);
return 0;
}
@ -6592,6 +6599,7 @@ static int cpu_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
cpu_clock_event_start(event, flags);
perf_event_update_userpage(event);
return 0;
}
@ -6666,6 +6674,7 @@ static int task_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
task_clock_event_start(event, flags);
perf_event_update_userpage(event);
return 0;
}
@ -7027,12 +7036,23 @@ EXPORT_SYMBOL_GPL(perf_pmu_unregister);
static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
{
struct perf_event_context *ctx = NULL;
int ret;
if (!try_module_get(pmu->module))
return -ENODEV;
if (event->group_leader != event) {
ctx = perf_event_ctx_lock(event->group_leader);
BUG_ON(!ctx);
}
event->pmu = pmu;
ret = pmu->event_init(event);
if (ctx)
perf_event_ctx_unlock(event->group_leader, ctx);
if (ret)
module_put(pmu->module);
@ -7079,10 +7099,6 @@ static void account_event_cpu(struct perf_event *event, int cpu)
if (event->parent)
return;
if (has_branch_stack(event)) {
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
}
if (is_cgroup_event(event))
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
}
@ -7121,7 +7137,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
void *context)
void *context, int cgroup_fd)
{
struct pmu *pmu;
struct perf_event *event;
@ -7176,16 +7192,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (task) {
event->attach_state = PERF_ATTACH_TASK;
if (attr->type == PERF_TYPE_TRACEPOINT)
event->hw.tp_target = task;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* hw_breakpoint is a bit difficult here..
* XXX pmu::event_init needs to know what task to account to
* and we cannot use the ctx information because we need the
* pmu before we get a ctx.
*/
else if (attr->type == PERF_TYPE_BREAKPOINT)
event->hw.bp_target = task;
#endif
event->hw.target = task;
}
if (!overflow_handler && parent_event) {
@ -7214,6 +7226,15 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto err_ns;
if (!has_branch_stack(event))
event->attr.branch_sample_type = 0;
if (cgroup_fd != -1) {
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
if (err)
goto err_ns;
}
pmu = perf_init_event(event);
if (!pmu)
goto err_ns;
@ -7237,6 +7258,8 @@ err_pmu:
event->destroy(event);
module_put(pmu->module);
err_ns:
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
@ -7455,6 +7478,7 @@ SYSCALL_DEFINE5(perf_event_open,
int move_group = 0;
int err;
int f_flags = O_RDWR;
int cgroup_fd = -1;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
@ -7520,21 +7544,16 @@ SYSCALL_DEFINE5(perf_event_open,
get_online_cpus();
if (flags & PERF_FLAG_PID_CGROUP)
cgroup_fd = pid;
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
NULL, NULL);
NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_cpus;
}
if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader);
if (err) {
__free_event(event);
goto err_cpus;
}
}
if (is_sampling_event(event)) {
if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
err = -ENOTSUPP;
@ -7576,7 +7595,7 @@ SYSCALL_DEFINE5(perf_event_open,
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pmu, task, event->cpu);
ctx = find_get_context(pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
@ -7771,7 +7790,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context);
overflow_handler, context, -1);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
@ -7782,7 +7801,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
account_event(event);
ctx = find_get_context(event->pmu, task, cpu);
ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_free;
@ -8132,7 +8151,7 @@ inherit_event(struct perf_event *parent_event,
parent_event->cpu,
child,
group_leader, parent_event,
NULL, NULL);
NULL, NULL, -1);
if (IS_ERR(child_event))
return child_event;

View File

@ -116,12 +116,12 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
*/
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{
struct task_struct *tsk = bp->hw.bp_target;
struct task_struct *tsk = bp->hw.target;
struct perf_event *iter;
int count = 0;
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
if (iter->hw.bp_target == tsk &&
if (iter->hw.target == tsk &&
find_slot_idx(iter) == type &&
(iter->cpu < 0 || cpu == iter->cpu))
count += hw_breakpoint_weight(iter);
@ -153,7 +153,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
int nr;
nr = info->cpu_pinned;
if (!bp->hw.bp_target)
if (!bp->hw.target)
nr += max_task_bp_pinned(cpu, type);
else
nr += task_bp_pinned(cpu, bp, type);
@ -210,7 +210,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
weight = -weight;
/* Pinned counter cpu profiling */
if (!bp->hw.bp_target) {
if (!bp->hw.target) {
get_bp_info(bp->cpu, type)->cpu_pinned += weight;
return;
}

View File

@ -1005,7 +1005,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
return true;
list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
if (event->hw.tp_target->mm == mm)
if (event->hw.target->mm == mm)
return true;
}
@ -1015,7 +1015,7 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
static inline bool
uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
{
return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
}
static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
@ -1023,10 +1023,10 @@ static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
bool done;
write_lock(&tu->filter.rwlock);
if (event->hw.tp_target) {
if (event->hw.target) {
list_del(&event->hw.tp_list);
done = tu->filter.nr_systemwide ||
(event->hw.tp_target->flags & PF_EXITING) ||
(event->hw.target->flags & PF_EXITING) ||
uprobe_filter_event(tu, event);
} else {
tu->filter.nr_systemwide--;
@ -1046,7 +1046,7 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
int err;
write_lock(&tu->filter.rwlock);
if (event->hw.tp_target) {
if (event->hw.target) {
/*
* event->parent != NULL means copy_process(), we can avoid
* uprobe_apply(). current->mm must be probed and we can rely

81
tools/build/Build.include Normal file
View File

@ -0,0 +1,81 @@
###
# build: Generic definitions
#
# Lots of this code have been borrowed or heavily inspired from parts
# of kbuild code, which is not credited, but mostly developed by:
#
# Copyright (C) Sam Ravnborg <sam@mars.ravnborg.org>, 2015
# Copyright (C) Linus Torvalds <torvalds@linux-foundation.org>, 2015
#
###
# Convenient variables
comma := ,
squote := '
###
# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
dot-target = $(dir $@).$(notdir $@)
###
# filename of target with directory and extension stripped
basetarget = $(basename $(notdir $@))
###
# The temporary file to save gcc -MD generated dependencies must not
# contain a comma
depfile = $(subst $(comma),_,$(dot-target).d)
###
# Check if both arguments has same arguments. Result is empty string if equal.
arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
$(filter-out $(cmd_$@), $(cmd_$(1))) )
###
# Escape single quote for use in echo statements
escsq = $(subst $(squote),'\$(squote)',$1)
# Echo command
# Short version is used, if $(quiet) equals `quiet_', otherwise full one.
echo-cmd = $(if $($(quiet)cmd_$(1)),\
echo ' $(call escsq,$($(quiet)cmd_$(1)))';)
###
# Replace >$< with >$$< to preserve $ when reloading the .cmd file
# (needed for make)
# Replace >#< with >\#< to avoid starting a comment in the .cmd file
# (needed for make)
# Replace >'< with >'\''< to be able to enclose the whole string in '...'
# (needed for the shell)
make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
###
# Find any prerequisites that is newer than target or that does not exist.
# PHONY targets skipped in both cases.
any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
###
# if_changed_dep - execute command if any prerequisite is newer than
# target, or command line has changed and update
# dependencies in the cmd file
if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \
@set -e; \
$(echo-cmd) $(cmd_$(1)); \
cat $(depfile) > $(dot-target).cmd; \
printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
# if_changed - execute command if any prerequisite is newer than
# target, or command line has changed
if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
@set -e; \
$(echo-cmd) $(cmd_$(1)); \
printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd)
###
# C flags to be used in rule definitions, includes:
# - depfile generation
# - global $(CFLAGS)
# - per target C flags
# - per object C flags
# - BUILD_STR macro to allow '-D"$(variable)"' constructs
c_flags = -Wp,-MD,$(depfile),-MT,$@ $(CFLAGS) -D"BUILD_STR(s)=\#s" $(CFLAGS_$(basetarget).o) $(CFLAGS_$(obj))

View File

@ -0,0 +1,139 @@
Build Framework
===============
The perf build framework was adopted from the kernel build system, hence the
idea and the way how objects are built is the same.
Basically the user provides set of 'Build' files that list objects and
directories to nest for specific target to be build.
Unlike the kernel we don't have a single build object 'obj-y' list that where
we setup source objects, but we support more. This allows one 'Build' file to
carry a sources list for multiple build objects.
a) Build framework makefiles
----------------------------
The build framework consists of 2 Makefiles:
Build.include
Makefile.build
While the 'Build.include' file contains just some generic definitions, the
'Makefile.build' file is the makefile used from the outside. It's
interface/usage is following:
$ make -f tools/build/Makefile srctree=$(KSRC) dir=$(DIR) obj=$(OBJECT)
where:
KSRC - is the path to kernel sources
DIR - is the path to the project to be built
OBJECT - is the name of the build object
When succefully finished the $(DIR) directory contains the final object file
called $(OBJECT)-in.o:
$ ls $(DIR)/$(OBJECT)-in.o
which includes all compiled sources described in 'Build' makefiles.
a) Build makefiles
------------------
The user supplies 'Build' makefiles that contains a objects list, and connects
the build to nested directories.
Assume we have the following project structure:
ex/a.c
/b.c
/c.c
/d.c
/arch/e.c
/arch/f.c
Out of which you build the 'ex' binary ' and the 'libex.a' library:
'ex' - consists of 'a.o', 'b.o' and libex.a
'libex.a' - consists of 'c.o', 'd.o', 'e.o' and 'f.o'
The build framework does not create the 'ex' and 'libex.a' binaries for you, it
only prepares proper objects to be compiled and grouped together.
To follow the above example, the user provides following 'Build' files:
ex/Build:
ex-y += a.o
ex-y += b.o
libex-y += c.o
libex-y += d.o
libex-y += arch/
ex/arch/Build:
libex-y += e.o
libex-y += f.o
and runs:
$ make -f tools/build/Makefile.build dir=. obj=ex
$ make -f tools/build/Makefile.build dir=. obj=libex
which creates the following objects:
ex/ex-in.o
ex/libex-in.o
that contain request objects names in Build files.
It's only a matter of 2 single commands to create the final binaries:
$ ar rcs libex.a libex-in.o
$ gcc -o ex ex-in.o libex.a
You can check the 'ex' example in 'tools/build/tests/ex' for more details.
b) Rules
--------
The build framework provides standard compilation rules to handle .S and .c
compilation.
It's possible to include special rule if needed (like we do for flex or bison
code generation).
c) CFLAGS
---------
It's possible to alter the standard object C flags in the following way:
CFLAGS_perf.o += '...' - alters CFLAGS for perf.o object
CFLAGS_gtk += '...' - alters CFLAGS for gtk build object
This C flags changes has the scope of the Build makefile they are defined in.
d) Dependencies
---------------
For each built object file 'a.o' the '.a.cmd' is created and holds:
- Command line used to built that object
(for each object)
- Dependency rules generated by 'gcc -Wp,-MD,...'
(for compiled object)
All existing '.cmd' files are included in the Build process to follow properly
the dependencies and trigger a rebuild when necessary.
e) Single rules
---------------
It's possible to build single object file by choice, like:
$ make util/map.o # objects
$ make util/map.i # preprocessor
$ make util/map.s # assembly

130
tools/build/Makefile.build Normal file
View File

@ -0,0 +1,130 @@
###
# Main build makefile.
#
# Lots of this code have been borrowed or heavily inspired from parts
# of kbuild code, which is not credited, but mostly developed by:
#
# Copyright (C) Sam Ravnborg <sam@mars.ravnborg.org>, 2015
# Copyright (C) Linus Torvalds <torvalds@linux-foundation.org>, 2015
#
PHONY := __build
__build:
ifeq ($(V),1)
quiet =
Q =
else
quiet=quiet_
Q=@
endif
build-dir := $(srctree)/tools/build
# Generic definitions
include $(build-dir)/Build.include
# do not force detected configuration
-include .config-detected
# Init all relevant variables used in build files so
# 1) they have correct type
# 2) they do not inherit any value from the environment
subdir-y :=
obj-y :=
subdir-y :=
subdir-obj-y :=
# Build definitions
build-file := $(dir)/Build
include $(build-file)
quiet_cmd_flex = FLEX $@
quiet_cmd_bison = BISON $@
# Create directory unless it exists
quiet_cmd_mkdir = MKDIR $(dir $@)
cmd_mkdir = mkdir -p $(dir $@)
rule_mkdir = $(if $(wildcard $(dir $@)),,@$(call echo-cmd,mkdir) $(cmd_mkdir))
# Compile command
quiet_cmd_cc_o_c = CC $@
cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
quiet_cmd_cc_i_c = CPP $@
cmd_cc_i_c = $(CC) $(c_flags) -E -o $@ $<
quiet_cmd_cc_s_c = AS $@
cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
# Link agregate command
# If there's nothing to link, create empty $@ object.
quiet_cmd_ld_multi = LD $@
cmd_ld_multi = $(if $(strip $(obj-y)),\
$(LD) -r -o $@ $(obj-y),rm -f $@; $(AR) rcs $@)
# Build rules
$(OUTPUT)%.o: %.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
$(OUTPUT)%.o: %.S FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
$(OUTPUT)%.i: %.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_i_c)
$(OUTPUT)%.i: %.S FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_i_c)
$(OUTPUT)%.s: %.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_s_c)
# Gather build data:
# obj-y - list of build objects
# subdir-y - list of directories to nest
# subdir-obj-y - list of directories objects 'dir/$(obj)-in.o'
obj-y := $($(obj)-y)
subdir-y := $(patsubst %/,%,$(filter %/, $(obj-y)))
obj-y := $(patsubst %/, %/$(obj)-in.o, $(obj-y))
subdir-obj-y := $(filter %/$(obj)-in.o, $(obj-y))
# '$(OUTPUT)/dir' prefix to all objects
prefix := $(subst ./,,$(OUTPUT)$(dir)/)
obj-y := $(addprefix $(prefix),$(obj-y))
subdir-obj-y := $(addprefix $(prefix),$(subdir-obj-y))
# Final '$(obj)-in.o' object
in-target := $(prefix)$(obj)-in.o
PHONY += $(subdir-y)
$(subdir-y):
$(Q)$(MAKE) -f $(build-dir)/Makefile.build dir=$(dir)/$@ obj=$(obj)
$(sort $(subdir-obj-y)): $(subdir-y) ;
$(in-target): $(obj-y) FORCE
$(call rule_mkdir)
$(call if_changed,ld_multi)
__build: $(in-target)
@:
PHONY += FORCE
FORCE:
# Include all cmd files to get all the dependency rules
# for all objects included
targets := $(wildcard $(sort $(obj-y) $(in-target) $(MAKECMDGOALS)))
cmd_files := $(wildcard $(foreach f,$(targets),$(dir $(f)).$(notdir $(f)).cmd))
ifneq ($(cmd_files),)
include $(cmd_files)
endif
.PHONY: $(PHONY)

View File

@ -0,0 +1,171 @@
feature_dir := $(srctree)/tools/build/feature
ifneq ($(OUTPUT),)
OUTPUT_FEATURES = $(OUTPUT)feature/
$(shell mkdir -p $(OUTPUT_FEATURES))
endif
feature_check = $(eval $(feature_check_code))
define feature_check_code
feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
endef
feature_set = $(eval $(feature_set_code))
define feature_set_code
feature-$(1) := 1
endef
#
# Build the feature check binaries in parallel, ignore errors, ignore return value and suppress output:
#
#
# Note that this is not a complete list of all feature tests, just
# those that are typically built on a fully configured system.
#
# [ Feature tests not mentioned here have to be built explicitly in
# the rule that uses them - an example for that is the 'bionic'
# feature check. ]
#
FEATURE_TESTS = \
backtrace \
dwarf \
fortify-source \
sync-compare-and-swap \
glibc \
gtk2 \
gtk2-infobar \
libaudit \
libbfd \
libelf \
libelf-getphdrnum \
libelf-mmap \
libnuma \
libperl \
libpython \
libpython-version \
libslang \
libunwind \
pthread-attr-setaffinity-np \
stackprotector-all \
timerfd \
libdw-dwarf-unwind \
zlib \
lzma
FEATURE_DISPLAY = \
dwarf \
glibc \
gtk2 \
libaudit \
libbfd \
libelf \
libnuma \
libperl \
libpython \
libslang \
libunwind \
libdw-dwarf-unwind \
zlib \
lzma
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
# mentioned in this list we need to refactor this ;-).
set_test_all_flags = $(eval $(set_test_all_flags_code))
define set_test_all_flags_code
FEATURE_CHECK_CFLAGS-all += $(FEATURE_CHECK_CFLAGS-$(1))
FEATURE_CHECK_LDFLAGS-all += $(FEATURE_CHECK_LDFLAGS-$(1))
endef
$(foreach feat,$(FEATURE_TESTS),$(call set_test_all_flags,$(feat)))
#
# Special fast-path for the 'all features are available' case:
#
$(call feature_check,all,$(MSG))
#
# Just in case the build freshly failed, make sure we print the
# feature matrix:
#
ifeq ($(feature-all), 1)
#
# test-all.c passed - just set all the core feature flags to 1:
#
$(foreach feat,$(FEATURE_TESTS),$(call feature_set,$(feat)))
else
$(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS)" LDFLAGS=$(LDFLAGS) -i -j -C $(feature_dir) $(addsuffix .bin,$(FEATURE_TESTS)) >/dev/null 2>&1)
$(foreach feat,$(FEATURE_TESTS),$(call feature_check,$(feat)))
endif
#
# Print the result of the feature test:
#
feature_print_status = $(eval $(feature_print_status_code)) $(info $(MSG))
define feature_print_status_code
ifeq ($(feature-$(1)), 1)
MSG = $(shell printf '...%30s: [ \033[32mon\033[m ]' $(1))
else
MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
endif
endef
feature_print_text = $(eval $(feature_print_text_code)) $(info $(MSG))
define feature_print_text_code
MSG = $(shell printf '...%30s: %s' $(1) $(2))
endef
FEATURE_DUMP := $(foreach feat,$(FEATURE_DISPLAY),feature-$(feat)($(feature-$(feat))))
FEATURE_DUMP_FILE := $(shell touch $(OUTPUT)FEATURE-DUMP; cat $(OUTPUT)FEATURE-DUMP)
ifeq ($(dwarf-post-unwind),1)
FEATURE_DUMP += dwarf-post-unwind($(dwarf-post-unwind-text))
endif
# The $(feature_display) controls the default detection message
# output. It's set if:
# - detected features differes from stored features from
# last build (in FEATURE-DUMP file)
# - one of the $(FEATURE_DISPLAY) is not detected
# - VF is enabled
ifneq ("$(FEATURE_DUMP)","$(FEATURE_DUMP_FILE)")
$(shell echo "$(FEATURE_DUMP)" > $(OUTPUT)FEATURE-DUMP)
feature_display := 1
endif
feature_display_check = $(eval $(feature_check_code))
define feature_display_check_code
ifneq ($(feature-$(1)), 1)
feature_display := 1
endif
endef
$(foreach feat,$(FEATURE_DISPLAY),$(call feature_display_check,$(feat)))
ifeq ($(VF),1)
feature_display := 1
feature_verbose := 1
endif
ifeq ($(feature_display),1)
$(info )
$(info Auto-detecting system features:)
$(foreach feat,$(FEATURE_DISPLAY),$(call feature_print_status,$(feat),))
ifeq ($(dwarf-post-unwind),1)
$(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
endif
ifneq ($(feature_verbose),1)
$(info )
endif
endif
ifeq ($(feature_verbose),1)
TMP := $(filter-out $(FEATURE_DISPLAY),$(FEATURE_TESTS))
$(foreach feat,$(TMP),$(call feature_print_status,$(feat),))
$(info )
endif

View File

@ -1,2 +1,3 @@
*.d
*.bin
*.output

View File

@ -29,33 +29,36 @@ FILES= \
test-stackprotector-all.bin \
test-timerfd.bin \
test-libdw-dwarf-unwind.bin \
test-libbabeltrace.bin \
test-compile-32.bin \
test-compile-x32.bin \
test-zlib.bin
test-zlib.bin \
test-lzma.bin
CC := $(CROSS_COMPILE)gcc -MD
PKG_CONFIG := $(CROSS_COMPILE)pkg-config
all: $(FILES)
BUILD = $(CC) $(CFLAGS) -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS)
__BUILD = $(CC) $(CFLAGS) -Wall -Werror -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS)
BUILD = $(__BUILD) > $(OUTPUT)$(@:.bin=.make.output) 2>&1
###############################
test-all.bin:
$(BUILD) -Werror -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz
$(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
test-hello.bin:
$(BUILD)
test-pthread-attr-setaffinity-np.bin:
$(BUILD) -D_GNU_SOURCE -Werror -lpthread
$(BUILD) -D_GNU_SOURCE -lpthread
test-stackprotector-all.bin:
$(BUILD) -Werror -fstack-protector-all
$(BUILD) -fstack-protector-all
test-fortify-source.bin:
$(BUILD) -O2 -Werror -D_FORTIFY_SOURCE=2
$(BUILD) -O2 -D_FORTIFY_SOURCE=2
test-bionic.bin:
$(BUILD)
@ -118,10 +121,10 @@ test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
test-liberty.bin:
$(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
$(CC) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
test-liberty-z.bin:
$(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz
$(CC) -Wall -Werror -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz
test-cplus-demangle.bin:
$(BUILD) -liberty
@ -133,10 +136,13 @@ test-timerfd.bin:
$(BUILD)
test-libdw-dwarf-unwind.bin:
$(BUILD)
$(BUILD) # -ldw provided by $(FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind)
test-libbabeltrace.bin:
$(BUILD) # -lbabeltrace provided by $(FEATURE_CHECK_LDFLAGS-libbabeltrace)
test-sync-compare-and-swap.bin:
$(BUILD) -Werror
$(BUILD)
test-compile-32.bin:
$(CC) -m32 -o $(OUTPUT)$@ test-compile.c
@ -147,9 +153,12 @@ test-compile-x32.bin:
test-zlib.bin:
$(BUILD) -lz
test-lzma.bin:
$(BUILD) -llzma
-include *.d
###############################
clean:
rm -f $(FILES) *.d
rm -f $(FILES) *.d $(FILES:.bin=.make.output)

View File

@ -98,7 +98,23 @@
#undef main
#define main main_test_pthread_attr_setaffinity_np
# include "test-pthread_attr_setaffinity_np.c"
# include "test-pthread-attr-setaffinity-np.c"
#undef main
# if 0
/*
* Disable libbabeltrace check for test-all, because the requested
* library version is not released yet in most distributions. Will
* reenable later.
*/
#define main main_test_libbabeltrace
# include "test-libbabeltrace.c"
#undef main
#endif
#define main main_test_lzma
# include "test-lzma.c"
#undef main
int main(int argc, char *argv[])
@ -126,6 +142,7 @@ int main(int argc, char *argv[])
main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
main_test_lzma();
return 0;
}

View File

@ -0,0 +1,9 @@
#include <babeltrace/ctf-writer/writer.h>
#include <babeltrace/ctf-ir/stream-class.h>
int main(void)
{
bt_ctf_stream_class_get_packet_context_type((void *) 0);
return 0;
}

View File

@ -0,0 +1,10 @@
#include <lzma.h>
int main(void)
{
lzma_stream strm = LZMA_STREAM_INIT;
int ret;
ret = lzma_stream_decoder(&strm, UINT64_MAX, LZMA_CONCATENATED);
return ret ? -1 : 0;
}

View File

@ -1,5 +1,6 @@
#include <stdint.h>
#include <pthread.h>
#include <sched.h>
int main(void)
{
@ -8,7 +9,8 @@ int main(void)
cpu_set_t cs;
pthread_attr_init(&thread_attr);
/* don't care abt exact args, just the API itself in libpthread */
CPU_ZERO(&cs);
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs);
return ret;

View File

@ -0,0 +1,8 @@
ex-y += ex.o
ex-y += a.o
ex-y += b.o
ex-y += empty/
libex-y += c.o
libex-y += d.o
libex-y += arch/

View File

@ -0,0 +1,23 @@
export srctree := ../../../..
export CC := gcc
export LD := ld
export AR := ar
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
ex: ex-in.o libex-in.o
gcc -o $@ $^
ex.%: FORCE
make -f $(srctree)/tools/build/Makefile.build dir=. $@
ex-in.o: FORCE
make $(build)=ex
libex-in.o: FORCE
make $(build)=libex
clean:
find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
rm -f ex ex.i ex.s
.PHONY: FORCE

5
tools/build/tests/ex/a.c Normal file
View File

@ -0,0 +1,5 @@
int a(void)
{
return 0;
}

View File

@ -0,0 +1,2 @@
libex-y += e.o
libex-y += f.o

View File

@ -0,0 +1,5 @@
int e(void)
{
return 0;
}

View File

@ -0,0 +1,5 @@
int f(void)
{
return 0;
}

5
tools/build/tests/ex/b.c Normal file
View File

@ -0,0 +1,5 @@
int b(void)
{
return 0;
}

5
tools/build/tests/ex/c.c Normal file
View File

@ -0,0 +1,5 @@
int c(void)
{
return 0;
}

5
tools/build/tests/ex/d.c Normal file
View File

@ -0,0 +1,5 @@
int d(void)
{
return 0;
}

View File

19
tools/build/tests/ex/ex.c Normal file
View File

@ -0,0 +1,19 @@
int a(void);
int b(void);
int c(void);
int d(void);
int e(void);
int f(void);
int main(void)
{
a();
b();
c();
d();
e();
f();
return 0;
}

42
tools/build/tests/run.sh Executable file
View File

@ -0,0 +1,42 @@
#!/bin/sh
function test_ex {
make -C ex V=1 clean > ex.out 2>&1
make -C ex V=1 >> ex.out 2>&1
if [ ! -x ./ex/ex ]; then
echo FAILED
exit -1
fi
make -C ex V=1 clean > /dev/null 2>&1
rm -f ex.out
}
function test_ex_suffix {
make -C ex V=1 clean > ex.out 2>&1
# use -rR to disable make's builtin rules
make -rR -C ex V=1 ex.o >> ex.out 2>&1
make -rR -C ex V=1 ex.i >> ex.out 2>&1
make -rR -C ex V=1 ex.s >> ex.out 2>&1
if [ -x ./ex/ex ]; then
echo FAILED
exit -1
fi
if [ ! -f ./ex/ex.o -o ! -f ./ex/ex.i -o ! -f ./ex/ex.s ]; then
echo FAILED
exit -1
fi
make -C ex V=1 clean > /dev/null 2>&1
rm -f ex.out
}
echo -n Testing..
test_ex
test_ex_suffix
echo OK

2
tools/lib/api/Build Normal file
View File

@ -0,0 +1,2 @@
libapi-y += fd/
libapi-y += fs/

View File

@ -1,49 +1,43 @@
include ../../scripts/Makefile.include
include ../../perf/config/utilities.mak # QUIET_CLEAN
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(shell pwd)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
#$(info Determined 'srctree' to be $(srctree))
endif
CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
# guard against environment variables
LIB_H=
LIB_OBJS=
MAKEFLAGS += --no-print-directory
LIB_H += fs/debugfs.h
LIB_H += fs/fs.h
# See comment below about piggybacking...
LIB_H += fd/array.h
LIBFILE = $(OUTPUT)libapi.a
LIB_OBJS += $(OUTPUT)fs/debugfs.o
LIB_OBJS += $(OUTPUT)fs/fs.o
# XXX piggybacking here, need to introduce libapikfd, or rename this
# to plain libapik.a and make it have it all api goodies
LIB_OBJS += $(OUTPUT)fd/array.o
LIBFILE = libapikfs.a
CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
EXTLIBS = -lelf -lpthread -lrt -lm
ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ALL_LDFLAGS = $(LDFLAGS)
CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 -fPIC
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
RM = rm -f
$(LIBFILE): $(LIB_OBJS)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $(OUTPUT)$@ $(LIB_OBJS)
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
API_IN := $(OUTPUT)libapi-in.o
$(LIB_OBJS): $(LIB_H)
export srctree OUTPUT CC LD CFLAGS V
libapi_dirs:
$(QUIET_MKDIR)mkdir -p $(OUTPUT)fd $(OUTPUT)fs
all: $(LIBFILE)
$(OUTPUT)%.o: %.c libapi_dirs
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
$(OUTPUT)%.s: %.c libapi_dirs
$(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
$(OUTPUT)%.o: %.S libapi_dirs
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
$(API_IN): FORCE
@$(MAKE) $(build)=libapi
$(LIBFILE): $(API_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(API_IN)
clean:
$(call QUIET_CLEAN, libapi) $(RM) $(LIB_OBJS) $(LIBFILE)
$(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o | xargs $(RM)
.PHONY: clean
FORCE:
.PHONY: clean FORCE

1
tools/lib/api/fd/Build Normal file
View File

@ -0,0 +1 @@
libapi-y += array.o

4
tools/lib/api/fs/Build Normal file
View File

@ -0,0 +1,4 @@
libapi-y += fs.o
libapi-y += debugfs.o
libapi-y += findfs.o
libapi-y += tracefs.o

View File

@ -3,75 +3,50 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdbool.h>
#include <sys/vfs.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mount.h>
#include <linux/kernel.h>
#include "debugfs.h"
char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
#ifndef DEBUGFS_DEFAULT_PATH
#define DEBUGFS_DEFAULT_PATH "/sys/kernel/debug"
#endif
char debugfs_mountpoint[PATH_MAX + 1] = DEBUGFS_DEFAULT_PATH;
static const char * const debugfs_known_mountpoints[] = {
"/sys/kernel/debug",
DEBUGFS_DEFAULT_PATH,
"/debug",
0,
};
static bool debugfs_found;
bool debugfs_configured(void)
{
return debugfs_find_mountpoint() != NULL;
}
/* find the path to the mounted debugfs */
const char *debugfs_find_mountpoint(void)
{
const char * const *ptr;
char type[100];
FILE *fp;
const char *ret;
if (debugfs_found)
return (const char *)debugfs_mountpoint;
ptr = debugfs_known_mountpoints;
while (*ptr) {
if (debugfs_valid_mountpoint(*ptr) == 0) {
debugfs_found = true;
strcpy(debugfs_mountpoint, *ptr);
return debugfs_mountpoint;
}
ptr++;
}
ret = find_mountpoint("debugfs", (long) DEBUGFS_MAGIC,
debugfs_mountpoint, PATH_MAX + 1,
debugfs_known_mountpoints);
if (ret)
debugfs_found = true;
/* give up and parse /proc/mounts */
fp = fopen("/proc/mounts", "r");
if (fp == NULL)
return NULL;
while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
debugfs_mountpoint, type) == 2) {
if (strcmp(type, "debugfs") == 0)
break;
}
fclose(fp);
if (strcmp(type, "debugfs") != 0)
return NULL;
debugfs_found = true;
return debugfs_mountpoint;
}
/* verify that a mountpoint is actually a debugfs instance */
int debugfs_valid_mountpoint(const char *debugfs)
{
struct statfs st_fs;
if (statfs(debugfs, &st_fs) < 0)
return -ENOENT;
else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC)
return -ENOENT;
return 0;
return ret;
}
/* mount the debugfs somewhere if it's not mounted */
@ -87,7 +62,7 @@ char *debugfs_mount(const char *mountpoint)
mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT);
/* if no environment variable, use default */
if (mountpoint == NULL)
mountpoint = "/sys/kernel/debug";
mountpoint = DEBUGFS_DEFAULT_PATH;
}
if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)

View File

@ -1,16 +1,7 @@
#ifndef __API_DEBUGFS_H__
#define __API_DEBUGFS_H__
#define _STR(x) #x
#define STR(x) _STR(x)
/*
* On most systems <limits.h> would have given us this, but not on some systems
* (e.g. GNU/Hurd).
*/
#ifndef PATH_MAX
#define PATH_MAX 4096
#endif
#include "findfs.h"
#ifndef DEBUGFS_MAGIC
#define DEBUGFS_MAGIC 0x64626720
@ -20,8 +11,8 @@
#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
#endif
bool debugfs_configured(void);
const char *debugfs_find_mountpoint(void);
int debugfs_valid_mountpoint(const char *debugfs);
char *debugfs_mount(const char *mountpoint);
extern char debugfs_mountpoint[];

63
tools/lib/api/fs/findfs.c Normal file
View File

@ -0,0 +1,63 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <sys/vfs.h>
#include "findfs.h"
/* verify that a mountpoint is actually the type we want */
int valid_mountpoint(const char *mount, long magic)
{
struct statfs st_fs;
if (statfs(mount, &st_fs) < 0)
return -ENOENT;
else if ((long)st_fs.f_type != magic)
return -ENOENT;
return 0;
}
/* find the path to a mounted file system */
const char *find_mountpoint(const char *fstype, long magic,
char *mountpoint, int len,
const char * const *known_mountpoints)
{
const char * const *ptr;
char format[128];
char type[100];
FILE *fp;
if (known_mountpoints) {
ptr = known_mountpoints;
while (*ptr) {
if (valid_mountpoint(*ptr, magic) == 0) {
strncpy(mountpoint, *ptr, len - 1);
mountpoint[len-1] = 0;
return mountpoint;
}
ptr++;
}
}
/* give up and parse /proc/mounts */
fp = fopen("/proc/mounts", "r");
if (fp == NULL)
return NULL;
snprintf(format, 128, "%%*s %%%ds %%99s %%*s %%*d %%*d\n", len);
while (fscanf(fp, format, mountpoint, type) == 2) {
if (strcmp(type, fstype) == 0)
break;
}
fclose(fp);
if (strcmp(type, fstype) != 0)
return NULL;
return mountpoint;
}

23
tools/lib/api/fs/findfs.h Normal file
View File

@ -0,0 +1,23 @@
#ifndef __API_FINDFS_H__
#define __API_FINDFS_H__
#include <stdbool.h>
#define _STR(x) #x
#define STR(x) _STR(x)
/*
* On most systems <limits.h> would have given us this, but not on some systems
* (e.g. GNU/Hurd).
*/
#ifndef PATH_MAX
#define PATH_MAX 4096
#endif
const char *find_mountpoint(const char *fstype, long magic,
char *mountpoint, int len,
const char * const *known_mountpoints);
int valid_mountpoint(const char *mount, long magic);
#endif /* __API_FINDFS_H__ */

View File

@ -0,0 +1,78 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdbool.h>
#include <sys/vfs.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mount.h>
#include <linux/kernel.h>
#include "tracefs.h"
#ifndef TRACEFS_DEFAULT_PATH
#define TRACEFS_DEFAULT_PATH "/sys/kernel/tracing"
#endif
char tracefs_mountpoint[PATH_MAX + 1] = TRACEFS_DEFAULT_PATH;
static const char * const tracefs_known_mountpoints[] = {
TRACEFS_DEFAULT_PATH,
"/sys/kernel/debug/tracing",
"/tracing",
"/trace",
0,
};
static bool tracefs_found;
bool tracefs_configured(void)
{
return tracefs_find_mountpoint() != NULL;
}
/* find the path to the mounted tracefs */
const char *tracefs_find_mountpoint(void)
{
const char *ret;
if (tracefs_found)
return (const char *)tracefs_mountpoint;
ret = find_mountpoint("tracefs", (long) TRACEFS_MAGIC,
tracefs_mountpoint, PATH_MAX + 1,
tracefs_known_mountpoints);
if (ret)
tracefs_found = true;
return ret;
}
/* mount the tracefs somewhere if it's not mounted */
char *tracefs_mount(const char *mountpoint)
{
/* see if it's already mounted */
if (tracefs_find_mountpoint())
goto out;
/* if not mounted and no argument */
if (mountpoint == NULL) {
/* see if environment variable set */
mountpoint = getenv(PERF_TRACEFS_ENVIRONMENT);
/* if no environment variable, use default */
if (mountpoint == NULL)
mountpoint = TRACEFS_DEFAULT_PATH;
}
if (mount(NULL, mountpoint, "tracefs", 0, NULL) < 0)
return NULL;
/* save the mountpoint */
tracefs_found = true;
strncpy(tracefs_mountpoint, mountpoint, sizeof(tracefs_mountpoint));
out:
return tracefs_mountpoint;
}

View File

@ -0,0 +1,21 @@
#ifndef __API_TRACEFS_H__
#define __API_TRACEFS_H__
#include "findfs.h"
#ifndef TRACEFS_MAGIC
#define TRACEFS_MAGIC 0x74726163
#endif
#ifndef PERF_TRACEFS_ENVIRONMENT
#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
#endif
bool tracefs_configured(void);
const char *tracefs_find_mountpoint(void);
int tracefs_valid_mountpoint(const char *debugfs);
char *tracefs_mount(const char *mountpoint);
extern char tracefs_mountpoint[];
#endif /* __API_DEBUGFS_H__ */

1
tools/lib/lockdep/Build Normal file
View File

@ -0,0 +1 @@
liblockdep-y += common.o lockdep.o preload.o rbtree.o

View File

@ -35,6 +35,10 @@ bindir = $(prefix)/$(bindir_relative)
export DESTDIR DESTDIR_SQ INSTALL
MAKEFLAGS += --no-print-directory
include ../../scripts/Makefile.include
# copy a bit from Linux kbuild
ifeq ("$(origin V)", "command line")
@ -44,56 +48,21 @@ ifndef VERBOSE
VERBOSE = 0
endif
ifeq ("$(origin O)", "command line")
BUILD_OUTPUT := $(O)
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(shell pwd)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
#$(info Determined 'srctree' to be $(srctree))
endif
ifeq ($(BUILD_SRC),)
ifneq ($(BUILD_OUTPUT),)
define build_output
$(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \
BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1
endef
saved-output := $(BUILD_OUTPUT)
BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd)
$(if $(BUILD_OUTPUT),, \
$(error output directory "$(saved-output)" does not exist))
all: sub-make
gui: force
$(call build_output, all_cmd)
$(filter-out gui,$(MAKECMDGOALS)): sub-make
sub-make: force
$(call build_output, $(MAKECMDGOALS))
# Leave processing to above invocation of make
skip-makefile := 1
endif # BUILD_OUTPUT
endif # BUILD_SRC
# We process the rest of the Makefile if this is the final invocation of make
ifeq ($(skip-makefile),)
srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)))
objtree := $(realpath $(CURDIR))
src := $(srctree)
obj := $(objtree)
export prefix libdir bindir src obj
# Shell quotes
libdir_SQ = $(subst ','\'',$(libdir))
bindir_SQ = $(subst ','\'',$(bindir))
LIB_FILE = liblockdep.a liblockdep.so.$(LIBLOCKDEP_VERSION)
LIB_IN := $(OUTPUT)liblockdep-in.o
BIN_FILE = lockdep
LIB_FILE = $(OUTPUT)liblockdep.a $(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION)
CONFIG_INCLUDES =
CONFIG_LIBS =
@ -108,33 +77,23 @@ INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
CFLAGS += -fPIC
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
ifeq ($(VERBOSE),1)
Q =
print_compile =
print_app_build =
print_fpic_compile =
print_shared_lib_compile =
print_install =
else
Q = @
print_compile = echo ' CC '$(OBJ);
print_app_build = echo ' BUILD '$(OBJ);
print_fpic_compile = echo ' CC FPIC '$(OBJ);
print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
print_shared_lib_compile = echo ' LD '$(OBJ);
print_static_lib_build = echo ' LD '$(OBJ);
print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
endif
do_fpic_compile = \
($(print_fpic_compile) \
$(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@)
do_app_build = \
($(print_app_build) \
$(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS))
export srctree OUTPUT CC LD CFLAGS V
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
do_compile_shared_library = \
($(print_shared_lib_compile) \
@ -144,22 +103,6 @@ do_build_static_lib = \
($(print_static_lib_build) \
$(RM) $@; $(AR) rcs $@ $^)
define do_compile
$(print_compile) \
$(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
endef
$(obj)/%.o: $(src)/%.c
$(Q)$(call do_compile)
%.o: $(src)/%.c
$(Q)$(call do_compile)
PEVENT_LIB_OBJS = common.o lockdep.o preload.o rbtree.o
ALL_OBJS = $(PEVENT_LIB_OBJS)
CMD_TARGETS = $(LIB_FILE)
TARGETS = $(CMD_TARGETS)
@ -169,42 +112,15 @@ all: all_cmd
all_cmd: $(CMD_TARGETS)
liblockdep.so.$(LIBLOCKDEP_VERSION): $(PEVENT_LIB_OBJS)
$(LIB_IN): force
$(Q)$(MAKE) $(build)=liblockdep
liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN)
$(Q)$(do_compile_shared_library)
liblockdep.a: $(PEVENT_LIB_OBJS)
liblockdep.a: $(LIB_IN)
$(Q)$(do_build_static_lib)
$(PEVENT_LIB_OBJS): %.o: $(src)/%.c
$(Q)$(do_fpic_compile)
## make deps
all_objs := $(sort $(ALL_OBJS))
all_deps := $(all_objs:%.o=.%.d)
# let .d file also depends on the source and header files
define check_deps
@set -e; $(RM) $@; \
$(CC) -MM $(CFLAGS) $< > $@.$$$$; \
sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
$(RM) $@.$$$$
endef
$(all_deps): .%.d: $(src)/%.c
$(Q)$(call check_deps)
$(all_objs) : %.o : .%.d
dep_includes := $(wildcard $(all_deps))
ifneq ($(dep_includes),)
include $(dep_includes)
endif
### Detect environment changes
TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE)
tags: force
$(RM) tags
find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
@ -233,8 +149,6 @@ clean:
$(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
$(RM) tags TAGS
endif # skip-makefile
PHONY += force
force:

View File

@ -0,0 +1,17 @@
libtraceevent-y += event-parse.o
libtraceevent-y += event-plugin.o
libtraceevent-y += trace-seq.o
libtraceevent-y += parse-filter.o
libtraceevent-y += parse-utils.o
libtraceevent-y += kbuffer-parse.o
plugin_jbd2-y += plugin_jbd2.o
plugin_hrtimer-y += plugin_hrtimer.o
plugin_kmem-y += plugin_kmem.o
plugin_kvm-y += plugin_kvm.o
plugin_mac80211-y += plugin_mac80211.o
plugin_sched_switch-y += plugin_sched_switch.o
plugin_function-y += plugin_function.o
plugin_xen-y += plugin_xen.o
plugin_scsi-y += plugin_scsi.o
plugin_cfg80211-y += plugin_cfg80211.o

View File

@ -67,7 +67,7 @@ PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)"
PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))'
endif
include $(if $(BUILD_SRC),$(BUILD_SRC)/)../../scripts/Makefile.include
include ../../scripts/Makefile.include
# copy a bit from Linux kbuild
@ -78,40 +78,13 @@ ifndef VERBOSE
VERBOSE = 0
endif
ifeq ("$(origin O)", "command line")
BUILD_OUTPUT := $(O)
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(shell pwd)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
#$(info Determined 'srctree' to be $(srctree))
endif
ifeq ($(BUILD_SRC),)
ifneq ($(OUTPUT),)
define build_output
$(if $(VERBOSE:1=),@)+$(MAKE) -C $(OUTPUT) \
BUILD_SRC=$(CURDIR)/ -f $(CURDIR)/Makefile $1
endef
all: sub-make
$(MAKECMDGOALS): sub-make
sub-make: force
$(call build_output, $(MAKECMDGOALS))
# Leave processing to above invocation of make
skip-makefile := 1
endif # OUTPUT
endif # BUILD_SRC
# We process the rest of the Makefile if this is the final invocation of make
ifeq ($(skip-makefile),)
srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
objtree := $(CURDIR)
src := $(srctree)
obj := $(objtree)
export prefix bindir src obj
# Shell quotes
@ -132,16 +105,19 @@ EXTRAVERSION = $(EP_EXTRAVERSION)
OBJ = $@
N =
export Q VERBOSE
EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
INCLUDES = -I. -I $(srctree)/../../include $(CONFIG_INCLUDES)
INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -Wall
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
else
CFLAGS := -g -Wall
endif
# Append required CFLAGS
override CFLAGS += -fPIC
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
override CFLAGS += $(udis86-flags) -D_GNU_SOURCE
@ -151,74 +127,58 @@ else
Q = @
endif
do_compile_shared_library = \
($(print_shared_lib_compile) \
$(CC) --shared $^ -o $@)
# Disable command line variables (CFLAGS) overide from top
# level Makefile (perf), otherwise build Makefile will get
# the same command line setup.
MAKEOVERRIDES=
do_plugin_build = \
($(print_plugin_build) \
$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<)
export srctree OUTPUT CC LD CFLAGS V
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
do_build_static_lib = \
($(print_static_lib_build) \
$(RM) $@; $(AR) rcs $@ $^)
PLUGINS = plugin_jbd2.so
PLUGINS += plugin_hrtimer.so
PLUGINS += plugin_kmem.so
PLUGINS += plugin_kvm.so
PLUGINS += plugin_mac80211.so
PLUGINS += plugin_sched_switch.so
PLUGINS += plugin_function.so
PLUGINS += plugin_xen.so
PLUGINS += plugin_scsi.so
PLUGINS += plugin_cfg80211.so
PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS))
PLUGINS_IN := $(PLUGINS:.so=-in.o)
do_compile = $(QUIET_CC)$(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
$(obj)/%.o: $(src)/%.c
$(call do_compile)
%.o: $(src)/%.c
$(call do_compile)
PEVENT_LIB_OBJS = event-parse.o
PEVENT_LIB_OBJS += event-plugin.o
PEVENT_LIB_OBJS += trace-seq.o
PEVENT_LIB_OBJS += parse-filter.o
PEVENT_LIB_OBJS += parse-utils.o
PEVENT_LIB_OBJS += kbuffer-parse.o
PLUGIN_OBJS = plugin_jbd2.o
PLUGIN_OBJS += plugin_hrtimer.o
PLUGIN_OBJS += plugin_kmem.o
PLUGIN_OBJS += plugin_kvm.o
PLUGIN_OBJS += plugin_mac80211.o
PLUGIN_OBJS += plugin_sched_switch.o
PLUGIN_OBJS += plugin_function.o
PLUGIN_OBJS += plugin_xen.o
PLUGIN_OBJS += plugin_scsi.o
PLUGIN_OBJS += plugin_cfg80211.o
PLUGINS := $(PLUGIN_OBJS:.o=.so)
ALL_OBJS = $(PEVENT_LIB_OBJS) $(PLUGIN_OBJS)
TE_IN := $(OUTPUT)libtraceevent-in.o
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
CMD_TARGETS = $(LIB_FILE) $(PLUGINS)
TARGETS = $(CMD_TARGETS)
all: all_cmd
all_cmd: $(CMD_TARGETS)
libtraceevent.so: $(PEVENT_LIB_OBJS)
$(TE_IN): force
$(Q)$(MAKE) $(build)=libtraceevent
$(OUTPUT)libtraceevent.so: $(TE_IN)
$(QUIET_LINK)$(CC) --shared $^ -o $@
libtraceevent.a: $(PEVENT_LIB_OBJS)
$(OUTPUT)libtraceevent.a: $(TE_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
plugins: $(PLUGINS)
$(PEVENT_LIB_OBJS): %.o: $(src)/%.c TRACEEVENT-CFLAGS
$(QUIET_CC_FPIC)$(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@
__plugin_obj = $(notdir $@)
plugin_obj = $(__plugin_obj:-in.o=)
$(PLUGIN_OBJS): %.o : $(src)/%.c
$(QUIET_CC_FPIC)$(CC) -c $(CFLAGS) -fPIC -o $@ $<
$(PLUGINS_IN): force
$(Q)$(MAKE) $(build)=$(plugin_obj)
$(PLUGINS): %.so: %.o
$(QUIET_LINK)$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $<
$(OUTPUT)%.so: $(OUTPUT)%-in.o
$(QUIET_LINK)$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $^
define make_version.h
(echo '/* This file is automatically generated. Do not modify. */'; \
@ -255,40 +215,6 @@ define update_dir
fi);
endef
## make deps
all_objs := $(sort $(ALL_OBJS))
all_deps := $(all_objs:%.o=.%.d)
# let .d file also depends on the source and header files
define check_deps
@set -e; $(RM) $@; \
$(CC) -MM $(CFLAGS) $< > $@.$$$$; \
sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
$(RM) $@.$$$$
endef
$(all_deps): .%.d: $(src)/%.c
$(Q)$(call check_deps)
$(all_objs) : %.o : .%.d
dep_includes := $(wildcard $(all_deps))
ifneq ($(dep_includes),)
include $(dep_includes)
endif
### Detect environment changes
TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE)
TRACEEVENT-CFLAGS: force
@FLAGS='$(TRACK_CFLAGS)'; \
if test x"$$FLAGS" != x"`cat TRACEEVENT-CFLAGS 2>/dev/null`" ; then \
echo 1>&2 " FLAGS: * new build flags or cross compiler"; \
echo "$$FLAGS" >TRACEEVENT-CFLAGS; \
fi
tags: force
$(RM) tags
find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
@ -327,14 +253,9 @@ clean:
$(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \
$(RM) TRACEEVENT-CFLAGS tags TAGS
endif # skip-makefile
PHONY += force plugins
force:
plugins:
@echo > /dev/null
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)

View File

@ -304,7 +304,10 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid)
if (!item)
return -1;
item->comm = strdup(comm);
if (comm)
item->comm = strdup(comm);
else
item->comm = strdup("<...>");
if (!item->comm) {
free(item);
return -1;
@ -318,9 +321,14 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid)
return 0;
}
void pevent_register_trace_clock(struct pevent *pevent, char *trace_clock)
int pevent_register_trace_clock(struct pevent *pevent, const char *trace_clock)
{
pevent->trace_clock = trace_clock;
pevent->trace_clock = strdup(trace_clock);
if (!pevent->trace_clock) {
errno = ENOMEM;
return -1;
}
return 0;
}
struct func_map {
@ -758,6 +766,11 @@ static void free_arg(struct print_arg *arg)
free_arg(arg->hex.field);
free_arg(arg->hex.size);
break;
case PRINT_INT_ARRAY:
free_arg(arg->int_array.field);
free_arg(arg->int_array.count);
free_arg(arg->int_array.el_size);
break;
case PRINT_TYPE:
free(arg->typecast.type);
free_arg(arg->typecast.item);
@ -2014,6 +2027,38 @@ process_entry(struct event_format *event __maybe_unused, struct print_arg *arg,
return EVENT_ERROR;
}
static int alloc_and_process_delim(struct event_format *event, char *next_token,
struct print_arg **print_arg)
{
struct print_arg *field;
enum event_type type;
char *token;
int ret = 0;
field = alloc_arg();
if (!field) {
do_warning_event(event, "%s: not enough memory!", __func__);
errno = ENOMEM;
return -1;
}
type = process_arg(event, field, &token);
if (test_type_token(type, token, EVENT_DELIM, next_token)) {
errno = EINVAL;
ret = -1;
free_arg(field);
goto out_free_token;
}
*print_arg = field;
out_free_token:
free_token(token);
return ret;
}
static char *arg_eval (struct print_arg *arg);
static unsigned long long
@ -2486,49 +2531,46 @@ out_free:
static enum event_type
process_hex(struct event_format *event, struct print_arg *arg, char **tok)
{
struct print_arg *field;
enum event_type type;
char *token = NULL;
memset(arg, 0, sizeof(*arg));
arg->type = PRINT_HEX;
field = alloc_arg();
if (!field) {
do_warning_event(event, "%s: not enough memory!", __func__);
goto out_free;
}
if (alloc_and_process_delim(event, ",", &arg->hex.field))
goto out;
type = process_arg(event, field, &token);
if (alloc_and_process_delim(event, ")", &arg->hex.size))
goto free_field;
if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;
return read_token_item(tok);
arg->hex.field = field;
free_field:
free_arg(arg->hex.field);
out:
*tok = NULL;
return EVENT_ERROR;
}
free_token(token);
static enum event_type
process_int_array(struct event_format *event, struct print_arg *arg, char **tok)
{
memset(arg, 0, sizeof(*arg));
arg->type = PRINT_INT_ARRAY;
field = alloc_arg();
if (!field) {
do_warning_event(event, "%s: not enough memory!", __func__);
*tok = NULL;
return EVENT_ERROR;
}
if (alloc_and_process_delim(event, ",", &arg->int_array.field))
goto out;
type = process_arg(event, field, &token);
if (alloc_and_process_delim(event, ",", &arg->int_array.count))
goto free_field;
if (test_type_token(type, token, EVENT_DELIM, ")"))
goto out_free;
if (alloc_and_process_delim(event, ")", &arg->int_array.el_size))
goto free_size;
arg->hex.size = field;
return read_token_item(tok);
free_token(token);
type = read_token_item(tok);
return type;
out_free:
free_arg(field);
free_token(token);
free_size:
free_arg(arg->int_array.count);
free_field:
free_arg(arg->int_array.field);
out:
*tok = NULL;
return EVENT_ERROR;
}
@ -2828,6 +2870,10 @@ process_function(struct event_format *event, struct print_arg *arg,
free_token(token);
return process_hex(event, arg, tok);
}
if (strcmp(token, "__print_array") == 0) {
free_token(token);
return process_int_array(event, arg, tok);
}
if (strcmp(token, "__get_str") == 0) {
free_token(token);
return process_str(event, arg, tok);
@ -3356,6 +3402,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
break;
case PRINT_FLAGS:
case PRINT_SYMBOL:
case PRINT_INT_ARRAY:
case PRINT_HEX:
break;
case PRINT_TYPE:
@ -3568,7 +3615,7 @@ static const struct flag flags[] = {
{ "HRTIMER_RESTART", 1 },
};
static unsigned long long eval_flag(const char *flag)
static long long eval_flag(const char *flag)
{
int i;
@ -3584,7 +3631,7 @@ static unsigned long long eval_flag(const char *flag)
if (strcmp(flags[i].name, flag) == 0)
return flags[i].value;
return 0;
return -1LL;
}
static void print_str_to_seq(struct trace_seq *s, const char *format,
@ -3658,7 +3705,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
struct print_flag_sym *flag;
struct format_field *field;
struct printk_map *printk;
unsigned long long val, fval;
long long val, fval;
unsigned long addr;
char *str;
unsigned char *hex;
@ -3717,11 +3764,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
print = 0;
for (flag = arg->flags.flags; flag; flag = flag->next) {
fval = eval_flag(flag->value);
if (!val && !fval) {
if (!val && fval < 0) {
print_str_to_seq(s, format, len_arg, flag->str);
break;
}
if (fval && (val & fval) == fval) {
if (fval > 0 && (val & fval) == fval) {
if (print && arg->flags.delim)
trace_seq_puts(s, arg->flags.delim);
print_str_to_seq(s, format, len_arg, flag->str);
@ -3766,6 +3813,54 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
break;
case PRINT_INT_ARRAY: {
void *num;
int el_size;
if (arg->int_array.field->type == PRINT_DYNAMIC_ARRAY) {
unsigned long offset;
struct format_field *field =
arg->int_array.field->dynarray.field;
offset = pevent_read_number(pevent,
data + field->offset,
field->size);
num = data + (offset & 0xffff);
} else {
field = arg->int_array.field->field.field;
if (!field) {
str = arg->int_array.field->field.name;
field = pevent_find_any_field(event, str);
if (!field)
goto out_warning_field;
arg->int_array.field->field.field = field;
}
num = data + field->offset;
}
len = eval_num_arg(data, size, event, arg->int_array.count);
el_size = eval_num_arg(data, size, event,
arg->int_array.el_size);
for (i = 0; i < len; i++) {
if (i)
trace_seq_putc(s, ' ');
if (el_size == 1) {
trace_seq_printf(s, "%u", *(uint8_t *)num);
} else if (el_size == 2) {
trace_seq_printf(s, "%u", *(uint16_t *)num);
} else if (el_size == 4) {
trace_seq_printf(s, "%u", *(uint32_t *)num);
} else if (el_size == 8) {
trace_seq_printf(s, "%lu", *(uint64_t *)num);
} else {
trace_seq_printf(s, "BAD SIZE:%d 0x%x",
el_size, *(uint8_t *)num);
el_size = 1;
}
num += el_size;
}
break;
}
case PRINT_TYPE:
break;
case PRINT_STRING: {
@ -3997,6 +4092,10 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
goto process_again;
case '.':
goto process_again;
case 'z':
case 'Z':
ls = 1;
goto process_again;
case 'p':
ls = 1;
/* fall through */
@ -4939,6 +5038,96 @@ const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid)
return comm;
}
static struct cmdline *
pid_from_cmdlist(struct pevent *pevent, const char *comm, struct cmdline *next)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)next;
if (cmdlist)
cmdlist = cmdlist->next;
else
cmdlist = pevent->cmdlist;
while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
cmdlist = cmdlist->next;
return (struct cmdline *)cmdlist;
}
/**
* pevent_data_pid_from_comm - return the pid from a given comm
* @pevent: a handle to the pevent
* @comm: the cmdline to find the pid from
* @next: the cmdline structure to find the next comm
*
* This returns the cmdline structure that holds a pid for a given
* comm, or NULL if none found. As there may be more than one pid for
* a given comm, the result of this call can be passed back into
* a recurring call in the @next paramater, and then it will find the
* next pid.
* Also, it does a linear seach, so it may be slow.
*/
struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *comm,
struct cmdline *next)
{
struct cmdline *cmdline;
/*
* If the cmdlines have not been converted yet, then use
* the list.
*/
if (!pevent->cmdlines)
return pid_from_cmdlist(pevent, comm, next);
if (next) {
/*
* The next pointer could have been still from
* a previous call before cmdlines were created
*/
if (next < pevent->cmdlines ||
next >= pevent->cmdlines + pevent->cmdline_count)
next = NULL;
else
cmdline = next++;
}
if (!next)
cmdline = pevent->cmdlines;
while (cmdline < pevent->cmdlines + pevent->cmdline_count) {
if (strcmp(cmdline->comm, comm) == 0)
return cmdline;
cmdline++;
}
return NULL;
}
/**
* pevent_cmdline_pid - return the pid associated to a given cmdline
* @cmdline: The cmdline structure to get the pid from
*
* Returns the pid for a give cmdline. If @cmdline is NULL, then
* -1 is returned.
*/
int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
if (!cmdline)
return -1;
/*
* If cmdlines have not been created yet, or cmdline is
* not part of the array, then treat it as a cmdlist instead.
*/
if (!pevent->cmdlines ||
cmdline < pevent->cmdlines ||
cmdline >= pevent->cmdlines + pevent->cmdline_count)
return cmdlist->pid;
return cmdline->pid;
}
/**
* pevent_data_comm_from_pid - parse the data into the print format
* @s: the trace_seq to write to
@ -5256,6 +5445,15 @@ static void print_args(struct print_arg *args)
print_args(args->hex.size);
printf(")");
break;
case PRINT_INT_ARRAY:
printf("__print_array(");
print_args(args->int_array.field);
printf(", ");
print_args(args->int_array.count);
printf(", ");
print_args(args->int_array.el_size);
printf(")");
break;
case PRINT_STRING:
case PRINT_BSTRING:
printf("__get_str(%s)", args->string.string);
@ -6228,15 +6426,20 @@ void pevent_ref(struct pevent *pevent)
pevent->ref_count++;
}
void pevent_free_format_field(struct format_field *field)
{
free(field->type);
free(field->name);
free(field);
}
static void free_format_fields(struct format_field *field)
{
struct format_field *next;
while (field) {
next = field->next;
free(field->type);
free(field->name);
free(field);
pevent_free_format_field(field);
field = next;
}
}
@ -6341,6 +6544,7 @@ void pevent_free(struct pevent *pevent)
free_handler(handle);
}
free(pevent->trace_clock);
free(pevent->events);
free(pevent->sort_events);

View File

@ -22,6 +22,7 @@
#include <stdbool.h>
#include <stdarg.h>
#include <stdio.h>
#include <regex.h>
#include <string.h>
@ -91,6 +92,7 @@ extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
extern void trace_seq_terminate(struct trace_seq *s);
extern int trace_seq_do_fprintf(struct trace_seq *s, FILE *fp);
extern int trace_seq_do_printf(struct trace_seq *s);
@ -114,7 +116,7 @@ struct pevent_plugin_option {
char *name;
char *plugin_alias;
char *description;
char *value;
const char *value;
void *priv;
int set;
};
@ -152,6 +154,10 @@ struct pevent_plugin_option {
* .plugin_alias is used to give a shorter name to access
* the vairable. Useful if a plugin handles more than one event.
*
* If .value is not set, then it is considered a boolean and only
* .set will be processed. If .value is defined, then it is considered
* a string option and .set will be ignored.
*
* PEVENT_PLUGIN_ALIAS: (optional)
* The name to use for finding options (uses filename if not defined)
*/
@ -245,6 +251,12 @@ struct print_arg_hex {
struct print_arg *size;
};
struct print_arg_int_array {
struct print_arg *field;
struct print_arg *count;
struct print_arg *el_size;
};
struct print_arg_dynarray {
struct format_field *field;
struct print_arg *index;
@ -273,6 +285,7 @@ enum print_arg_type {
PRINT_FLAGS,
PRINT_SYMBOL,
PRINT_HEX,
PRINT_INT_ARRAY,
PRINT_TYPE,
PRINT_STRING,
PRINT_BSTRING,
@ -292,6 +305,7 @@ struct print_arg {
struct print_arg_flags flags;
struct print_arg_symbol symbol;
struct print_arg_hex hex;
struct print_arg_int_array int_array;
struct print_arg_func func;
struct print_arg_string string;
struct print_arg_bitmask bitmask;
@ -597,7 +611,7 @@ enum trace_flag_type {
};
int pevent_register_comm(struct pevent *pevent, const char *comm, int pid);
void pevent_register_trace_clock(struct pevent *pevent, char *trace_clock);
int pevent_register_trace_clock(struct pevent *pevent, const char *trace_clock);
int pevent_register_function(struct pevent *pevent, char *name,
unsigned long long addr, char *mod);
int pevent_register_print_string(struct pevent *pevent, const char *fmt,
@ -617,6 +631,7 @@ enum pevent_errno pevent_parse_format(struct pevent *pevent,
const char *buf,
unsigned long size, const char *sys);
void pevent_free_format(struct event_format *event);
void pevent_free_format_field(struct format_field *field);
void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
const char *name, struct pevent_record *record,
@ -675,6 +690,11 @@ int pevent_data_type(struct pevent *pevent, struct pevent_record *rec);
struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type);
int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec);
const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid);
struct cmdline;
struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *comm,
struct cmdline *next);
int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline);
void pevent_event_info(struct trace_seq *s, struct event_format *event,
struct pevent_record *record);
int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,

View File

@ -18,6 +18,7 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include <dlfcn.h>
@ -49,6 +50,52 @@ struct plugin_list {
void *handle;
};
static void lower_case(char *str)
{
if (!str)
return;
for (; *str; str++)
*str = tolower(*str);
}
static int update_option_value(struct pevent_plugin_option *op, const char *val)
{
char *op_val;
if (!val) {
/* toggle, only if option is boolean */
if (op->value)
/* Warn? */
return 0;
op->set ^= 1;
return 0;
}
/*
* If the option has a value then it takes a string
* otherwise the option is a boolean.
*/
if (op->value) {
op->value = val;
return 0;
}
/* Option is boolean, must be either "1", "0", "true" or "false" */
op_val = strdup(val);
if (!op_val)
return -1;
lower_case(op_val);
if (strcmp(val, "1") == 0 || strcmp(val, "true") == 0)
op->set = 1;
else if (strcmp(val, "0") == 0 || strcmp(val, "false") == 0)
op->set = 0;
free(op_val);
return 0;
}
/**
* traceevent_plugin_list_options - get list of plugin options
*
@ -120,6 +167,7 @@ update_option(const char *file, struct pevent_plugin_option *option)
{
struct trace_plugin_options *op;
char *plugin;
int ret = 0;
if (option->plugin_alias) {
plugin = strdup(option->plugin_alias);
@ -144,9 +192,10 @@ update_option(const char *file, struct pevent_plugin_option *option)
if (strcmp(op->option, option->name) != 0)
continue;
option->value = op->value;
option->set ^= 1;
goto out;
ret = update_option_value(option, op->value);
if (ret)
goto out;
break;
}
/* first look for unnamed options */
@ -156,14 +205,13 @@ update_option(const char *file, struct pevent_plugin_option *option)
if (strcmp(op->option, option->name) != 0)
continue;
option->value = op->value;
option->set ^= 1;
ret = update_option_value(option, op->value);
break;
}
out:
free(plugin);
return 0;
return ret;
}
/**

View File

@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
switch (type_len) {
case KBUFFER_TYPE_PADDING:
*length = read_4(kbuf, data);
data += *length;
break;
case KBUFFER_TYPE_TIME_EXTEND:
@ -730,3 +729,14 @@ void kbuffer_set_old_format(struct kbuffer *kbuf)
kbuf->next_event = __old_next_event;
}
/**
* kbuffer_start_of_data - return offset of where data starts on subbuffer
* @kbuf: The kbuffer
*
* Returns the location on the subbuffer where the data starts.
*/
int kbuffer_start_of_data(struct kbuffer *kbuf)
{
return kbuf->start;
}

View File

@ -63,5 +63,6 @@ int kbuffer_missed_events(struct kbuffer *kbuf);
int kbuffer_subbuffer_size(struct kbuffer *kbuf);
void kbuffer_set_old_format(struct kbuffer *kbuf);
int kbuffer_start_of_data(struct kbuffer *kbuf);
#endif /* _K_BUFFER_H */

View File

@ -1058,6 +1058,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
*parg = current_op;
else
*parg = current_exp;
free(token);
return PEVENT_ERRNO__UNBALANCED_PAREN;
}
break;
@ -1168,6 +1169,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
*parg = current_op;
free(token);
return 0;
fail_alloc:

View File

@ -231,19 +231,24 @@ void trace_seq_terminate(struct trace_seq *s)
s->buffer[s->len] = 0;
}
int trace_seq_do_printf(struct trace_seq *s)
int trace_seq_do_fprintf(struct trace_seq *s, FILE *fp)
{
TRACE_SEQ_CHECK(s);
switch (s->state) {
case TRACE_SEQ__GOOD:
return printf("%.*s", s->len, s->buffer);
return fprintf(fp, "%.*s", s->len, s->buffer);
case TRACE_SEQ__BUFFER_POISONED:
puts("Usage of trace_seq after it was destroyed");
fprintf(fp, "%s\n", "Usage of trace_seq after it was destroyed");
break;
case TRACE_SEQ__MEM_ALLOC_FAILED:
puts("Can't allocate trace_seq buffer memory");
fprintf(fp, "%s\n", "Can't allocate trace_seq buffer memory");
break;
}
return -1;
}
int trace_seq_do_printf(struct trace_seq *s)
{
return trace_seq_do_fprintf(s, stdout);
}

View File

@ -1,6 +1,7 @@
PERF-CFLAGS
PERF-GUI-VARS
PERF-VERSION-FILE
FEATURE-DUMP
perf
perf-read-vdso32
perf-read-vdsox32

44
tools/perf/Build Normal file
View File

@ -0,0 +1,44 @@
perf-y += builtin-bench.o
perf-y += builtin-annotate.o
perf-y += builtin-diff.o
perf-y += builtin-evlist.o
perf-y += builtin-help.o
perf-y += builtin-sched.o
perf-y += builtin-buildid-list.o
perf-y += builtin-buildid-cache.o
perf-y += builtin-list.o
perf-y += builtin-record.o
perf-y += builtin-report.o
perf-y += builtin-stat.o
perf-y += builtin-timechart.o
perf-y += builtin-top.o
perf-y += builtin-script.o
perf-y += builtin-kmem.o
perf-y += builtin-lock.o
perf-y += builtin-kvm.o
perf-y += builtin-inject.o
perf-y += builtin-mem.o
perf-y += builtin-data.o
perf-$(CONFIG_AUDIT) += builtin-trace.o
perf-$(CONFIG_LIBELF) += builtin-probe.o
perf-y += bench/
perf-y += tests/
perf-y += perf.o
paths += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))"
paths += -DPERF_INFO_PATH="BUILD_STR($(infodir_SQ))"
paths += -DPERF_MAN_PATH="BUILD_STR($(mandir_SQ))"
CFLAGS_builtin-help.o += $(paths)
CFLAGS_builtin-timechart.o += $(paths)
CFLAGS_perf.o += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))" -include $(OUTPUT)PERF-VERSION-FILE
libperf-y += util/
libperf-y += arch/
libperf-y += ui/
libperf-y += scripts/
gtk-y += ui/gtk/

View File

@ -0,0 +1,49 @@
1) perf build
=============
The perf build process consists of several separated building blocks,
which are linked together to form the perf binary:
- libperf library (static)
- perf builtin commands
- traceevent library (static)
- GTK ui library
Several makefiles govern the perf build:
- Makefile
top level Makefile working as a wrapper that calls the main
Makefile.perf with a -j option to do parallel builds.
- Makefile.perf
main makefile that triggers build of all perf objects including
installation and documentation processing.
- tools/build/Makefile.build
main makefile of the build framework
- tools/build/Build.include
build framework generic definitions
- Build makefiles
makefiles that defines build objects
Please refer to tools/build/Documentation/Build.txt for more
information about build framework.
2) perf build
=============
The Makefile.perf triggers the build framework for build objects:
perf, libperf, gtk
resulting in following objects:
$ ls *-in.o
gtk-in.o libperf-in.o perf-in.o
Those objects are then used in final linking:
libperf-gtk.so <- gtk-in.o libperf-in.o
perf <- perf-in.o libperf-in.o
NOTE this description is omitting other libraries involved, only
focusing on build framework outcomes

View File

@ -12,9 +12,9 @@ SYNOPSIS
DESCRIPTION
-----------
This command manages the build-id cache. It can add and remove files to/from
the cache. In the future it should as well purge older entries, set upper
limits for the space used by the cache, etc.
This command manages the build-id cache. It can add, remove, update and purge
files to/from the cache. In the future it should as well set upper limits for
the space used by the cache, etc.
OPTIONS
-------
@ -36,14 +36,24 @@ OPTIONS
actually made.
-r::
--remove=::
Remove specified file from the cache.
Remove a cached binary which has same build-id of specified file
from the cache.
-p::
--purge=::
Purge all cached binaries including older caches which have specified
path from the cache.
-M::
--missing=::
List missing build ids in the cache for the specified file.
-u::
--update::
Update specified file of the cache. It can be used to update kallsyms
kernel dso to vmlinux in order to support annotation.
--update=::
Update specified file of the cache. Note that this doesn't remove
older entires since those may be still needed for annotating old
(or remote) perf.data. Only if there is already a cache which has
exactly same build-id, that is replaced by new one. It can be used
to update kallsyms and kernel dso to vmlinux in order to support
annotation.
-v::
--verbose::
Be more verbose.

View File

@ -0,0 +1,40 @@
perf-data(1)
==============
NAME
----
perf-data - Data file related processing
SYNOPSIS
--------
[verse]
'perf data' [<common options>] <command> [<options>]",
DESCRIPTION
-----------
Data file related processing.
COMMANDS
--------
convert::
Converts perf data file into another format (only CTF [1] format is support by now).
It's possible to set data-convert debug variable to get debug messages from conversion,
like:
perf --debug data-convert data convert ...
OPTIONS for 'convert'
---------------------
--to-ctf::
Triggers the CTF conversion, specify the path of CTF data directory.
-i::
Specify input perf data file path.
-v::
--verbose::
Be more verbose (show counter open errors, etc).
SEE ALSO
--------
linkperf:perf[1]
[1] Common Trace Format - http://www.efficios.com/ctf

View File

@ -20,12 +20,20 @@ If no parameters are passed it will assume perf.data.old and perf.data.
The differential profile is displayed only for events matching both
specified perf.data files.
If no parameters are passed the samples will be sorted by dso and symbol.
As the perf.data files could come from different binaries, the symbols addresses
could vary. So perf diff is based on the comparison of the files and
symbols name.
OPTIONS
-------
-D::
--dump-raw-trace::
Dump raw trace in ASCII.
--kallsyms=<file>::
kallsyms pathname
-m::
--modules::
Load module symbols. WARNING: use only with -k and LIVE kernel

View File

@ -25,6 +25,10 @@ OPTIONS
--input=<file>::
Select the input file (default: perf.data unless stdin is a fifo)
-v::
--verbose::
Be more verbose. (show symbol address, etc)
--caller::
Show per-callsite statistics

View File

@ -127,6 +127,12 @@ To limit the list use:
One or more types can be used at the same time, listing the events for the
types specified.
Support raw format:
. '--raw-dump', shows the raw-dump of all the events.
. '--raw-dump [hw|sw|cache|tracepoint|pmu|event_glob]', shows the raw-dump of
a certain kind of events.
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-top[1],

View File

@ -47,6 +47,12 @@ OPTIONS
-v::
--verbose::
Be more verbose (show parsed arguments, etc).
Can not use with -q.
-q::
--quiet::
Be quiet (do not show any messages including errors).
Can not use with -v.
-a::
--add=::
@ -96,7 +102,7 @@ OPTIONS
Dry run. With this option, --add and --del doesn't execute actual
adding and removal operations.
--max-probes::
--max-probes=NUM::
Set the maximum number of probe points for an event. Default is 128.
-x::
@ -104,8 +110,13 @@ OPTIONS
Specify path to the executable or shared library file for user
space tracing. Can also be used with --funcs option.
--demangle::
Demangle application symbols. --no-demangle is also available
for disabling demangling.
--demangle-kernel::
Demangle kernel symbols.
Demangle kernel symbols. --no-demangle-kernel is also available
for disabling kernel demangling.
In absence of -m/-x options, perf probe checks if the first argument after
the options is an absolute path name. If its an absolute path, perf probe
@ -137,6 +148,7 @@ Each probe argument follows below syntax.
[NAME=]LOCALVAR|$retval|%REG|@SYMBOL[:TYPE]
'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
'$vars' special argument is also available for NAME, it is expanded to the local variables which can access at given probe point.
'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.

View File

@ -55,6 +55,11 @@ OPTIONS
If you want to profile write accesses in [0x1000~1008), just set
'mem:0x1000/8:w'.
- a group of events surrounded by a pair of brace ("{event1,event2,...}").
Each event is separated by commas and the group should be quoted to
prevent the shell interpretation. You also need to use --group on
"perf report" to view group events together.
--filter=<filter>::
Event filter.
@ -62,9 +67,6 @@ OPTIONS
--all-cpus::
System-wide collection from all CPUs.
-l::
Scale counter values.
-p::
--pid=::
Record events on existing process ID (comma separated list).
@ -107,6 +109,10 @@ OPTIONS
specification with appended unit character - B/K/M/G. The
size is rounded up to have nearest pages power of two value.
--group::
Put all events in a single event group. This precedes the --event
option and remains only for backward compatibility. See --event.
-g::
Enables call-graph (stack chain/backtrace) recording.
@ -115,13 +121,19 @@ OPTIONS
implies -g.
Allows specifying "fp" (frame pointer) or "dwarf"
(DWARF's CFI - Call Frame Information) as the method to collect
(DWARF's CFI - Call Frame Information) or "lbr"
(Hardware Last Branch Record facility) as the method to collect
the information used to show the call graphs.
In some systems, where binaries are build with gcc
--fomit-frame-pointer, using the "fp" method will produce bogus
call graphs, using "dwarf", if available (perf tools linked to
the libunwind library) should be used instead.
Using the "lbr" method doesn't require any compiler options. It
will produce call graphs from the hardware LBR registers. The
main limition is that it is only available on new Intel
platforms, such as Haswell. It can only get user call chain. It
doesn't work with branch stack sampling at the same time.
-q::
--quiet::
@ -235,6 +247,9 @@ Capture machine state (registers) at interrupt, i.e., on counter overflows for
each sample. List of captured registers depends on the architecture. This option
is off by default.
--running-time::
Record running and enabled time for read events (:S)
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1]

Some files were not shown because too many files have changed in this diff Show More