mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-13 20:33:15 +00:00
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (172 commits) perf_event, amd: Fix spinlock initialization perf_event: Fix preempt warning in perf_clock() perf tools: Flush maps on COMM events perf_events, x86: Split PMU definitions into separate files perf annotate: Handle samples not at objdump output addr boundaries perf_events, x86: Remove superflous MSR writes perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in() perf_events, x86: AMD event scheduling perf_events: Add new start/stop PMU callbacks perf_events: Report the MMAP pgoff value in bytes perf annotate: Defer allocating sym_priv->hist array perf symbols: Improve debugging information about symtab origins perf top: Use a macro instead of a constant variable perf symbols: Check the right return variable perf/scripts: Tag syscall_name helper as not yet available perf/scripts: Add perf-trace-python Documentation perf/scripts: Remove unnecessary PyTuple resizes perf/scripts: Add syscall tracing scripts perf/scripts: Add Python scripting engine perf/scripts: Remove check-perf-trace from listed scripts ... Fix trivial conflict in tools/perf/util/probe-event.c
This commit is contained in:
commit
6556a67435
@ -24,6 +24,7 @@ Synopsis of kprobe_events
|
||||
-------------------------
|
||||
p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe
|
||||
r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe
|
||||
-:[GRP/]EVENT : Clear a probe
|
||||
|
||||
GRP : Group name. If omitted, use "kprobes" for it.
|
||||
EVENT : Event name. If omitted, the event name is generated
|
||||
@ -37,15 +38,12 @@ Synopsis of kprobe_events
|
||||
@SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol)
|
||||
$stackN : Fetch Nth entry of stack (N >= 0)
|
||||
$stack : Fetch stack address.
|
||||
$argN : Fetch function argument. (N >= 0)(*)
|
||||
$retval : Fetch return value.(**)
|
||||
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***)
|
||||
$retval : Fetch return value.(*)
|
||||
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
|
||||
NAME=FETCHARG: Set NAME as the argument name of FETCHARG.
|
||||
|
||||
(*) aN may not correct on asmlinkaged functions and at the middle of
|
||||
function body.
|
||||
(**) only for return probe.
|
||||
(***) this is useful for fetching a field of data structures.
|
||||
(*) only for return probe.
|
||||
(**) this is useful for fetching a field of data structures.
|
||||
|
||||
|
||||
Per-Probe Event Filtering
|
||||
@ -82,13 +80,16 @@ Usage examples
|
||||
To add a probe as a new event, write a new definition to kprobe_events
|
||||
as below.
|
||||
|
||||
echo p:myprobe do_sys_open dfd=$arg0 filename=$arg1 flags=$arg2 mode=$arg3 > /sys/kernel/debug/tracing/kprobe_events
|
||||
echo 'p:myprobe do_sys_open dfd=%ax filename=%dx flags=%cx mode=+4($stack)' > /sys/kernel/debug/tracing/kprobe_events
|
||||
|
||||
This sets a kprobe on the top of do_sys_open() function with recording
|
||||
1st to 4th arguments as "myprobe" event. As this example shows, users can
|
||||
choose more familiar names for each arguments.
|
||||
1st to 4th arguments as "myprobe" event. Note, which register/stack entry is
|
||||
assigned to each function argument depends on arch-specific ABI. If you unsure
|
||||
the ABI, please try to use probe subcommand of perf-tools (you can find it
|
||||
under tools/perf/).
|
||||
As this example shows, users can choose more familiar names for each arguments.
|
||||
|
||||
echo r:myretprobe do_sys_open $retval >> /sys/kernel/debug/tracing/kprobe_events
|
||||
echo 'r:myretprobe do_sys_open $retval' >> /sys/kernel/debug/tracing/kprobe_events
|
||||
|
||||
This sets a kretprobe on the return point of do_sys_open() function with
|
||||
recording return value as "myretprobe" event.
|
||||
@ -97,23 +98,24 @@ recording return value as "myretprobe" event.
|
||||
|
||||
cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format
|
||||
name: myprobe
|
||||
ID: 75
|
||||
ID: 780
|
||||
format:
|
||||
field:unsigned short common_type; offset:0; size:2;
|
||||
field:unsigned char common_flags; offset:2; size:1;
|
||||
field:unsigned char common_preempt_count; offset:3; size:1;
|
||||
field:int common_pid; offset:4; size:4;
|
||||
field:int common_tgid; offset:8; size:4;
|
||||
field:unsigned short common_type; offset:0; size:2; signed:0;
|
||||
field:unsigned char common_flags; offset:2; size:1; signed:0;
|
||||
field:unsigned char common_preempt_count; offset:3; size:1;signed:0;
|
||||
field:int common_pid; offset:4; size:4; signed:1;
|
||||
field:int common_lock_depth; offset:8; size:4; signed:1;
|
||||
|
||||
field: unsigned long ip; offset:16;tsize:8;
|
||||
field: int nargs; offset:24;tsize:4;
|
||||
field: unsigned long dfd; offset:32;tsize:8;
|
||||
field: unsigned long filename; offset:40;tsize:8;
|
||||
field: unsigned long flags; offset:48;tsize:8;
|
||||
field: unsigned long mode; offset:56;tsize:8;
|
||||
field:unsigned long __probe_ip; offset:12; size:4; signed:0;
|
||||
field:int __probe_nargs; offset:16; size:4; signed:1;
|
||||
field:unsigned long dfd; offset:20; size:4; signed:0;
|
||||
field:unsigned long filename; offset:24; size:4; signed:0;
|
||||
field:unsigned long flags; offset:28; size:4; signed:0;
|
||||
field:unsigned long mode; offset:32; size:4; signed:0;
|
||||
|
||||
print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, REC->filename, REC->flags, REC->mode
|
||||
|
||||
print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->__probe_ip,
|
||||
REC->dfd, REC->filename, REC->flags, REC->mode
|
||||
|
||||
You can see that the event has 4 arguments as in the expressions you specified.
|
||||
|
||||
@ -121,6 +123,12 @@ print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, R
|
||||
|
||||
This clears all probe points.
|
||||
|
||||
Or,
|
||||
|
||||
echo -:myprobe >> kprobe_events
|
||||
|
||||
This clears probe points selectively.
|
||||
|
||||
Right after definition, each event is disabled by default. For tracing these
|
||||
events, you need to enable it.
|
||||
|
||||
@ -146,4 +154,3 @@ events, you need to enable it.
|
||||
returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel
|
||||
returns from do_sys_open to sys_open+0x1b).
|
||||
|
||||
|
||||
|
@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
|
||||
return 1;
|
||||
|
||||
ss_probe:
|
||||
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
|
||||
#if !defined(CONFIG_PREEMPT)
|
||||
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
|
||||
/* Boost up -- we can execute copied instructions directly */
|
||||
ia64_psr(regs)->ri = p->ainsn.slot;
|
||||
|
@ -495,9 +495,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||
|
||||
entry->nr = 0;
|
||||
|
||||
if (current->pid == 0) /* idle task? */
|
||||
return entry;
|
||||
|
||||
if (!user_mode(regs)) {
|
||||
perf_callchain_kernel(regs, entry);
|
||||
if (current->mm)
|
||||
|
@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count,
|
||||
return n;
|
||||
}
|
||||
|
||||
static void event_sched_in(struct perf_event *event, int cpu)
|
||||
static void event_sched_in(struct perf_event *event)
|
||||
{
|
||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||
event->oncpu = cpu;
|
||||
event->oncpu = smp_processor_id();
|
||||
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
||||
if (is_software_event(event))
|
||||
event->pmu->enable(event);
|
||||
@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
|
||||
*/
|
||||
int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx, int cpu)
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw;
|
||||
long i, n, n0;
|
||||
@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
cpuhw->event[i]->hw.config = cpuhw->events[i];
|
||||
cpuctx->active_oncpu += n;
|
||||
n = 1;
|
||||
event_sched_in(group_leader, cpu);
|
||||
event_sched_in(group_leader);
|
||||
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
|
||||
if (sub->state != PERF_EVENT_STATE_OFF) {
|
||||
event_sched_in(sub, cpu);
|
||||
event_sched_in(sub);
|
||||
++n;
|
||||
}
|
||||
}
|
||||
|
@ -68,9 +68,6 @@ perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||
|
||||
is_user = user_mode(regs);
|
||||
|
||||
if (!current || current->pid == 0)
|
||||
return;
|
||||
|
||||
if (is_user && current->state != TASK_RUNNING)
|
||||
return;
|
||||
|
||||
|
@ -980,10 +980,10 @@ static int collect_events(struct perf_event *group, int max_count,
|
||||
return n;
|
||||
}
|
||||
|
||||
static void event_sched_in(struct perf_event *event, int cpu)
|
||||
static void event_sched_in(struct perf_event *event)
|
||||
{
|
||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||
event->oncpu = cpu;
|
||||
event->oncpu = smp_processor_id();
|
||||
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
||||
if (is_software_event(event))
|
||||
event->pmu->enable(event);
|
||||
@ -991,7 +991,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
|
||||
|
||||
int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx, int cpu)
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct perf_event *sub;
|
||||
@ -1015,10 +1015,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
|
||||
cpuctx->active_oncpu += n;
|
||||
n = 1;
|
||||
event_sched_in(group_leader, cpu);
|
||||
event_sched_in(group_leader);
|
||||
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
|
||||
if (sub->state != PERF_EVENT_STATE_OFF) {
|
||||
event_sched_in(sub, cpu);
|
||||
event_sched_in(sub);
|
||||
n++;
|
||||
}
|
||||
}
|
||||
|
@ -65,12 +65,17 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
|
||||
void *text, void *text_end);
|
||||
extern void alternatives_smp_module_del(struct module *mod);
|
||||
extern void alternatives_smp_switch(int smp);
|
||||
extern int alternatives_text_reserved(void *start, void *end);
|
||||
#else
|
||||
static inline void alternatives_smp_module_add(struct module *mod, char *name,
|
||||
void *locks, void *locks_end,
|
||||
void *text, void *text_end) {}
|
||||
static inline void alternatives_smp_module_del(struct module *mod) {}
|
||||
static inline void alternatives_smp_switch(int smp) {}
|
||||
static inline int alternatives_text_reserved(void *start, void *end)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* alternative assembly primitive: */
|
||||
|
@ -14,6 +14,9 @@
|
||||
which debugging register was responsible for the trap. The other bits
|
||||
are either reserved or not of interest to us. */
|
||||
|
||||
/* Define reserved bits in DR6 which are always set to 1 */
|
||||
#define DR6_RESERVED (0xFFFF0FF0)
|
||||
|
||||
#define DR_TRAP0 (0x1) /* db0 */
|
||||
#define DR_TRAP1 (0x2) /* db1 */
|
||||
#define DR_TRAP2 (0x4) /* db2 */
|
||||
|
@ -19,7 +19,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
|
||||
extern int check_nmi_watchdog(void);
|
||||
extern int nmi_watchdog_enabled;
|
||||
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
|
||||
extern int avail_to_resrv_perfctr_nmi(unsigned int);
|
||||
extern int reserve_perfctr_nmi(unsigned int);
|
||||
extern void release_perfctr_nmi(unsigned int);
|
||||
extern int reserve_evntsel_nmi(unsigned int);
|
||||
|
@ -27,7 +27,14 @@
|
||||
/*
|
||||
* Includes eventsel and unit mask as well:
|
||||
*/
|
||||
#define ARCH_PERFMON_EVENT_MASK 0xffff
|
||||
|
||||
|
||||
#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
|
||||
#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
|
||||
#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
|
||||
#define INTEL_ARCH_INV_MASK 0x00800000ULL
|
||||
#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
|
||||
#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
|
||||
|
||||
/*
|
||||
* filter mask to validate fixed counter events.
|
||||
@ -38,7 +45,12 @@
|
||||
* The other filters are supported by fixed counters.
|
||||
* The any-thread option is supported starting with v3.
|
||||
*/
|
||||
#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000
|
||||
#define INTEL_ARCH_FIXED_MASK \
|
||||
(INTEL_ARCH_CNT_MASK| \
|
||||
INTEL_ARCH_INV_MASK| \
|
||||
INTEL_ARCH_EDGE_MASK|\
|
||||
INTEL_ARCH_UNIT_MASK|\
|
||||
INTEL_ARCH_EVTSEL_MASK)
|
||||
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
||||
|
@ -274,10 +274,6 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get Nth argument at function call */
|
||||
extern unsigned long regs_get_argument_nth(struct pt_regs *regs,
|
||||
unsigned int n);
|
||||
|
||||
/*
|
||||
* These are defined as per linux/ptrace.h, which see.
|
||||
*/
|
||||
|
@ -3,8 +3,6 @@
|
||||
|
||||
extern int kstack_depth_to_print;
|
||||
|
||||
int x86_is_stack_id(int id, char *name);
|
||||
|
||||
struct thread_info;
|
||||
struct stacktrace_ops;
|
||||
|
||||
|
@ -390,6 +390,24 @@ void alternatives_smp_switch(int smp)
|
||||
mutex_unlock(&smp_alt);
|
||||
}
|
||||
|
||||
/* Return 1 if the address range is reserved for smp-alternatives */
|
||||
int alternatives_text_reserved(void *start, void *end)
|
||||
{
|
||||
struct smp_alt_module *mod;
|
||||
u8 **ptr;
|
||||
u8 *text_start = start;
|
||||
u8 *text_end = end;
|
||||
|
||||
list_for_each_entry(mod, &smp_alt_modules, next) {
|
||||
if (mod->text > text_end || mod->text_end < text_start)
|
||||
continue;
|
||||
for (ptr = mod->locks; ptr < mod->locks_end; ptr++)
|
||||
if (text_start <= *ptr && text_end >= *ptr)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
|
File diff suppressed because it is too large
Load Diff
416
arch/x86/kernel/cpu/perf_event_amd.c
Normal file
416
arch/x86/kernel/cpu/perf_event_amd.c
Normal file
@ -0,0 +1,416 @@
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(amd_nb_lock);
|
||||
|
||||
static __initconst u64 amd_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
{
|
||||
[ C(L1D) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
|
||||
[ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
|
||||
[ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
|
||||
},
|
||||
},
|
||||
[ C(L1I ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
|
||||
[ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
|
||||
[ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
|
||||
[ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
|
||||
[ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(BPU ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
|
||||
[ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* AMD Performance Monitor K7 and later.
|
||||
*/
|
||||
static const u64 amd_perfmon_event_map[] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
||||
};
|
||||
|
||||
static u64 amd_pmu_event_map(int hw_event)
|
||||
{
|
||||
return amd_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
||||
static u64 amd_pmu_raw_event(u64 hw_event)
|
||||
{
|
||||
#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
|
||||
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
|
||||
#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
|
||||
#define K7_EVNTSEL_INV_MASK 0x000800000ULL
|
||||
#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
|
||||
|
||||
#define K7_EVNTSEL_MASK \
|
||||
(K7_EVNTSEL_EVENT_MASK | \
|
||||
K7_EVNTSEL_UNIT_MASK | \
|
||||
K7_EVNTSEL_EDGE_MASK | \
|
||||
K7_EVNTSEL_INV_MASK | \
|
||||
K7_EVNTSEL_REG_MASK)
|
||||
|
||||
return hw_event & K7_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* AMD64 events are detected based on their event codes.
|
||||
*/
|
||||
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
|
||||
{
|
||||
return (hwc->config & 0xe0) == 0xe0;
|
||||
}
|
||||
|
||||
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct amd_nb *nb = cpuc->amd_nb;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* only care about NB events
|
||||
*/
|
||||
if (!(nb && amd_is_nb_event(hwc)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* need to scan whole list because event may not have
|
||||
* been assigned during scheduling
|
||||
*
|
||||
* no race condition possible because event can only
|
||||
* be removed on one CPU at a time AND PMU is disabled
|
||||
* when we come here
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
||||
if (nb->owners[i] == event) {
|
||||
cmpxchg(nb->owners+i, event, NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* AMD64 NorthBridge events need special treatment because
|
||||
* counter access needs to be synchronized across all cores
|
||||
* of a package. Refer to BKDG section 3.12
|
||||
*
|
||||
* NB events are events measuring L3 cache, Hypertransport
|
||||
* traffic. They are identified by an event code >= 0xe00.
|
||||
* They measure events on the NorthBride which is shared
|
||||
* by all cores on a package. NB events are counted on a
|
||||
* shared set of counters. When a NB event is programmed
|
||||
* in a counter, the data actually comes from a shared
|
||||
* counter. Thus, access to those counters needs to be
|
||||
* synchronized.
|
||||
*
|
||||
* We implement the synchronization such that no two cores
|
||||
* can be measuring NB events using the same counters. Thus,
|
||||
* we maintain a per-NB allocation table. The available slot
|
||||
* is propagated using the event_constraint structure.
|
||||
*
|
||||
* We provide only one choice for each NB event based on
|
||||
* the fact that only NB events have restrictions. Consequently,
|
||||
* if a counter is available, there is a guarantee the NB event
|
||||
* will be assigned to it. If no slot is available, an empty
|
||||
* constraint is returned and scheduling will eventually fail
|
||||
* for this event.
|
||||
*
|
||||
* Note that all cores attached the same NB compete for the same
|
||||
* counters to host NB events, this is why we use atomic ops. Some
|
||||
* multi-chip CPUs may have more than one NB.
|
||||
*
|
||||
* Given that resources are allocated (cmpxchg), they must be
|
||||
* eventually freed for others to use. This is accomplished by
|
||||
* calling amd_put_event_constraints().
|
||||
*
|
||||
* Non NB events are not impacted by this restriction.
|
||||
*/
|
||||
static struct event_constraint *
|
||||
amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct amd_nb *nb = cpuc->amd_nb;
|
||||
struct perf_event *old = NULL;
|
||||
int max = x86_pmu.num_events;
|
||||
int i, j, k = -1;
|
||||
|
||||
/*
|
||||
* if not NB event or no NB, then no constraints
|
||||
*/
|
||||
if (!(nb && amd_is_nb_event(hwc)))
|
||||
return &unconstrained;
|
||||
|
||||
/*
|
||||
* detect if already present, if so reuse
|
||||
*
|
||||
* cannot merge with actual allocation
|
||||
* because of possible holes
|
||||
*
|
||||
* event can already be present yet not assigned (in hwc->idx)
|
||||
* because of successive calls to x86_schedule_events() from
|
||||
* hw_perf_group_sched_in() without hw_perf_enable()
|
||||
*/
|
||||
for (i = 0; i < max; i++) {
|
||||
/*
|
||||
* keep track of first free slot
|
||||
*/
|
||||
if (k == -1 && !nb->owners[i])
|
||||
k = i;
|
||||
|
||||
/* already present, reuse */
|
||||
if (nb->owners[i] == event)
|
||||
goto done;
|
||||
}
|
||||
/*
|
||||
* not present, so grab a new slot
|
||||
* starting either at:
|
||||
*/
|
||||
if (hwc->idx != -1) {
|
||||
/* previous assignment */
|
||||
i = hwc->idx;
|
||||
} else if (k != -1) {
|
||||
/* start from free slot found */
|
||||
i = k;
|
||||
} else {
|
||||
/*
|
||||
* event not found, no slot found in
|
||||
* first pass, try again from the
|
||||
* beginning
|
||||
*/
|
||||
i = 0;
|
||||
}
|
||||
j = i;
|
||||
do {
|
||||
old = cmpxchg(nb->owners+i, NULL, event);
|
||||
if (!old)
|
||||
break;
|
||||
if (++i == max)
|
||||
i = 0;
|
||||
} while (i != j);
|
||||
done:
|
||||
if (!old)
|
||||
return &nb->event_constraints[i];
|
||||
|
||||
return &emptyconstraint;
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu amd_pmu = {
|
||||
.name = "AMD",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = x86_pmu_disable_all,
|
||||
.enable_all = x86_pmu_enable_all,
|
||||
.enable = x86_pmu_enable_event,
|
||||
.disable = x86_pmu_disable_event,
|
||||
.eventsel = MSR_K7_EVNTSEL0,
|
||||
.perfctr = MSR_K7_PERFCTR0,
|
||||
.event_map = amd_pmu_event_map,
|
||||
.raw_event = amd_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
||||
.num_events = 4,
|
||||
.event_bits = 48,
|
||||
.event_mask = (1ULL << 48) - 1,
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
.get_event_constraints = amd_get_event_constraints,
|
||||
.put_event_constraints = amd_put_event_constraints
|
||||
};
|
||||
|
||||
static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
|
||||
{
|
||||
struct amd_nb *nb;
|
||||
int i;
|
||||
|
||||
nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
|
||||
if (!nb)
|
||||
return NULL;
|
||||
|
||||
memset(nb, 0, sizeof(*nb));
|
||||
nb->nb_id = nb_id;
|
||||
|
||||
/*
|
||||
* initialize all possible NB constraints
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
||||
set_bit(i, nb->event_constraints[i].idxmsk);
|
||||
nb->event_constraints[i].weight = 1;
|
||||
}
|
||||
return nb;
|
||||
}
|
||||
|
||||
static void amd_pmu_cpu_online(int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpu1, *cpu2;
|
||||
struct amd_nb *nb = NULL;
|
||||
int i, nb_id;
|
||||
|
||||
if (boot_cpu_data.x86_max_cores < 2)
|
||||
return;
|
||||
|
||||
/*
|
||||
* function may be called too early in the
|
||||
* boot process, in which case nb_id is bogus
|
||||
*/
|
||||
nb_id = amd_get_nb_id(cpu);
|
||||
if (nb_id == BAD_APICID)
|
||||
return;
|
||||
|
||||
cpu1 = &per_cpu(cpu_hw_events, cpu);
|
||||
cpu1->amd_nb = NULL;
|
||||
|
||||
raw_spin_lock(&amd_nb_lock);
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
cpu2 = &per_cpu(cpu_hw_events, i);
|
||||
nb = cpu2->amd_nb;
|
||||
if (!nb)
|
||||
continue;
|
||||
if (nb->nb_id == nb_id)
|
||||
goto found;
|
||||
}
|
||||
|
||||
nb = amd_alloc_nb(cpu, nb_id);
|
||||
if (!nb) {
|
||||
pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
|
||||
raw_spin_unlock(&amd_nb_lock);
|
||||
return;
|
||||
}
|
||||
found:
|
||||
nb->refcnt++;
|
||||
cpu1->amd_nb = nb;
|
||||
|
||||
raw_spin_unlock(&amd_nb_lock);
|
||||
}
|
||||
|
||||
static void amd_pmu_cpu_offline(int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw;
|
||||
|
||||
if (boot_cpu_data.x86_max_cores < 2)
|
||||
return;
|
||||
|
||||
cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
raw_spin_lock(&amd_nb_lock);
|
||||
|
||||
if (--cpuhw->amd_nb->refcnt == 0)
|
||||
kfree(cpuhw->amd_nb);
|
||||
|
||||
cpuhw->amd_nb = NULL;
|
||||
|
||||
raw_spin_unlock(&amd_nb_lock);
|
||||
}
|
||||
|
||||
static __init int amd_pmu_init(void)
|
||||
{
|
||||
/* Performance-monitoring supported from K7 and later: */
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return -ENODEV;
|
||||
|
||||
x86_pmu = amd_pmu;
|
||||
|
||||
/* Events are common for all AMDs */
|
||||
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
/*
|
||||
* explicitly initialize the boot cpu, other cpus will get
|
||||
* the cpu hotplug callbacks from smp_init()
|
||||
*/
|
||||
amd_pmu_cpu_online(smp_processor_id());
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_CPU_SUP_AMD */
|
||||
|
||||
static int amd_pmu_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_pmu_cpu_online(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void amd_pmu_cpu_offline(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
971
arch/x86/kernel/cpu/perf_event_intel.c
Normal file
971
arch/x86/kernel/cpu/perf_event_intel.c
Normal file
@ -0,0 +1,971 @@
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
|
||||
/*
|
||||
* Intel PerfMon v3. Used on Core2 and later.
|
||||
*/
|
||||
static const u64 intel_perfmon_event_map[] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
|
||||
};
|
||||
|
||||
static struct event_constraint intel_core_event_constraints[] =
|
||||
{
|
||||
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
|
||||
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
||||
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
||||
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
||||
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
|
||||
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_core2_event_constraints[] =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
|
||||
FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
|
||||
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
||||
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
|
||||
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
||||
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
||||
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
||||
INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
|
||||
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
|
||||
INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
|
||||
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_nehalem_event_constraints[] =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
|
||||
FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
|
||||
INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
|
||||
INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
|
||||
INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
|
||||
INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
|
||||
INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
|
||||
INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
|
||||
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
|
||||
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_westmere_event_constraints[] =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
|
||||
FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
|
||||
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
|
||||
INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
|
||||
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_gen_event_constraints[] =
|
||||
{
|
||||
FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
|
||||
FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static u64 intel_pmu_event_map(int hw_event)
|
||||
{
|
||||
return intel_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
||||
static __initconst u64 westmere_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
{
|
||||
[ C(L1D) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
|
||||
[ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
|
||||
[ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
|
||||
},
|
||||
},
|
||||
[ C(L1I ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
|
||||
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0x0,
|
||||
},
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
|
||||
[ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
|
||||
[ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
|
||||
[ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0x0,
|
||||
},
|
||||
},
|
||||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
|
||||
[ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(BPU ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
|
||||
[ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static __initconst u64 nehalem_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
{
|
||||
[ C(L1D) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
|
||||
[ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
|
||||
[ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
|
||||
[ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
|
||||
},
|
||||
},
|
||||
[ C(L1I ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
|
||||
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0x0,
|
||||
},
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
|
||||
[ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
|
||||
[ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
|
||||
[ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
|
||||
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
|
||||
[ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0x0,
|
||||
},
|
||||
},
|
||||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
|
||||
[ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(BPU ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
|
||||
[ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static __initconst u64 core2_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
{
|
||||
[ C(L1D) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
|
||||
[ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
|
||||
[ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(L1I ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
|
||||
[ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
|
||||
[ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
|
||||
[ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
|
||||
[ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
|
||||
[ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
|
||||
[ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(BPU ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
|
||||
[ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static __initconst u64 atom_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
{
|
||||
[ C(L1D) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(L1I ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
|
||||
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
|
||||
[ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
|
||||
[ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
|
||||
[ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
|
||||
[ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0,
|
||||
[ C(RESULT_MISS) ] = 0,
|
||||
},
|
||||
},
|
||||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
|
||||
[ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
[ C(BPU ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
|
||||
[ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static u64 intel_pmu_raw_event(u64 hw_event)
|
||||
{
|
||||
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
|
||||
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
||||
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
|
||||
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
|
||||
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
||||
|
||||
#define CORE_EVNTSEL_MASK \
|
||||
(INTEL_ARCH_EVTSEL_MASK | \
|
||||
INTEL_ARCH_UNIT_MASK | \
|
||||
INTEL_ARCH_EDGE_MASK | \
|
||||
INTEL_ARCH_INV_MASK | \
|
||||
INTEL_ARCH_CNT_MASK)
|
||||
|
||||
return hw_event & CORE_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_bts(u64 config)
|
||||
{
|
||||
unsigned long debugctlmsr;
|
||||
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
||||
debugctlmsr |= X86_DEBUGCTL_TR;
|
||||
debugctlmsr |= X86_DEBUGCTL_BTS;
|
||||
debugctlmsr |= X86_DEBUGCTL_BTINT;
|
||||
|
||||
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
|
||||
debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
|
||||
|
||||
if (!(config & ARCH_PERFMON_EVENTSEL_USR))
|
||||
debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
|
||||
|
||||
update_debugctlmsr(debugctlmsr);
|
||||
}
|
||||
|
||||
static void intel_pmu_disable_bts(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
unsigned long debugctlmsr;
|
||||
|
||||
if (!cpuc->ds)
|
||||
return;
|
||||
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
||||
debugctlmsr &=
|
||||
~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
|
||||
X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
|
||||
|
||||
update_debugctlmsr(debugctlmsr);
|
||||
}
|
||||
|
||||
static void intel_pmu_disable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
||||
|
||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
||||
intel_pmu_disable_bts();
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
||||
|
||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
||||
struct perf_event *event =
|
||||
cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
||||
|
||||
if (WARN_ON_ONCE(!event))
|
||||
return;
|
||||
|
||||
intel_pmu_enable_bts(event->hw.config);
|
||||
}
|
||||
}
|
||||
|
||||
static inline u64 intel_pmu_get_status(void)
|
||||
{
|
||||
u64 status;
|
||||
|
||||
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static inline void intel_pmu_ack_status(u64 ack)
|
||||
{
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
|
||||
{
|
||||
int idx = __idx - X86_PMC_IDX_FIXED;
|
||||
u64 ctrl_val, mask;
|
||||
|
||||
mask = 0xfULL << (idx * 4);
|
||||
|
||||
rdmsrl(hwc->config_base, ctrl_val);
|
||||
ctrl_val &= ~mask;
|
||||
(void)checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_bts_buffer(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct debug_store *ds = cpuc->ds;
|
||||
struct bts_record {
|
||||
u64 from;
|
||||
u64 to;
|
||||
u64 flags;
|
||||
};
|
||||
struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
||||
struct bts_record *at, *top;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_event_header header;
|
||||
struct perf_sample_data data;
|
||||
struct pt_regs regs;
|
||||
|
||||
if (!event)
|
||||
return;
|
||||
|
||||
if (!ds)
|
||||
return;
|
||||
|
||||
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
|
||||
top = (struct bts_record *)(unsigned long)ds->bts_index;
|
||||
|
||||
if (top <= at)
|
||||
return;
|
||||
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
|
||||
|
||||
data.period = event->hw.last_period;
|
||||
data.addr = 0;
|
||||
data.raw = NULL;
|
||||
regs.ip = 0;
|
||||
|
||||
/*
|
||||
* Prepare a generic sample, i.e. fill in the invariant fields.
|
||||
* We will overwrite the from and to address before we output
|
||||
* the sample.
|
||||
*/
|
||||
perf_prepare_sample(&header, &data, event, ®s);
|
||||
|
||||
if (perf_output_begin(&handle, event,
|
||||
header.size * (top - at), 1, 1))
|
||||
return;
|
||||
|
||||
for (; at < top; at++) {
|
||||
data.ip = at->from;
|
||||
data.addr = at->to;
|
||||
|
||||
perf_output_sample(&handle, &header, &data, event);
|
||||
}
|
||||
|
||||
perf_output_end(&handle);
|
||||
|
||||
/* There's new data available. */
|
||||
event->hw.interrupts++;
|
||||
event->pending_kill = POLL_IN;
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||
intel_pmu_disable_bts();
|
||||
intel_pmu_drain_bts_buffer();
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
||||
intel_pmu_disable_fixed(hwc, idx);
|
||||
return;
|
||||
}
|
||||
|
||||
x86_pmu_disable_event(hwc, idx);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
|
||||
{
|
||||
int idx = __idx - X86_PMC_IDX_FIXED;
|
||||
u64 ctrl_val, bits, mask;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Enable IRQ generation (0x8),
|
||||
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
|
||||
* if requested:
|
||||
*/
|
||||
bits = 0x8ULL;
|
||||
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
|
||||
bits |= 0x2;
|
||||
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
|
||||
bits |= 0x1;
|
||||
|
||||
/*
|
||||
* ANY bit is supported in v3 and up
|
||||
*/
|
||||
if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
|
||||
bits |= 0x4;
|
||||
|
||||
bits <<= (idx * 4);
|
||||
mask = 0xfULL << (idx * 4);
|
||||
|
||||
rdmsrl(hwc->config_base, ctrl_val);
|
||||
ctrl_val &= ~mask;
|
||||
ctrl_val |= bits;
|
||||
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||
if (!__get_cpu_var(cpu_hw_events).enabled)
|
||||
return;
|
||||
|
||||
intel_pmu_enable_bts(hwc->config);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
||||
intel_pmu_enable_fixed(hwc, idx);
|
||||
return;
|
||||
}
|
||||
|
||||
__x86_pmu_enable_event(hwc, idx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save and restart an expired event. Called by NMI contexts,
|
||||
* so it has to be careful about preempting normal event ops:
|
||||
*/
|
||||
static int intel_pmu_save_and_restart(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
int ret;
|
||||
|
||||
x86_perf_event_update(event, hwc, idx);
|
||||
ret = x86_perf_event_set_period(event, hwc, idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_pmu_reset(void)
|
||||
{
|
||||
struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
|
||||
if (!x86_pmu.num_events)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
||||
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
|
||||
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
|
||||
}
|
||||
for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
|
||||
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
||||
}
|
||||
if (ds)
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This handler is triggered by the local APIC, so the APIC IRQ handling
|
||||
* rules apply:
|
||||
*/
|
||||
static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_events *cpuc;
|
||||
int bit, loops;
|
||||
u64 ack, status;
|
||||
|
||||
data.addr = 0;
|
||||
data.raw = NULL;
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
perf_disable();
|
||||
intel_pmu_drain_bts_buffer();
|
||||
status = intel_pmu_get_status();
|
||||
if (!status) {
|
||||
perf_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
loops = 0;
|
||||
again:
|
||||
if (++loops > 100) {
|
||||
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
|
||||
perf_event_print_debug();
|
||||
intel_pmu_reset();
|
||||
perf_enable();
|
||||
return 1;
|
||||
}
|
||||
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
ack = status;
|
||||
for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||
struct perf_event *event = cpuc->events[bit];
|
||||
|
||||
clear_bit(bit, (unsigned long *) &status);
|
||||
if (!test_bit(bit, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
if (!intel_pmu_save_and_restart(event))
|
||||
continue;
|
||||
|
||||
data.period = event->hw.last_period;
|
||||
|
||||
if (perf_event_overflow(event, 1, &data, regs))
|
||||
intel_pmu_disable_event(&event->hw, bit);
|
||||
}
|
||||
|
||||
intel_pmu_ack_status(ack);
|
||||
|
||||
/*
|
||||
* Repeat if there is more work to be done:
|
||||
*/
|
||||
status = intel_pmu_get_status();
|
||||
if (status)
|
||||
goto again;
|
||||
|
||||
perf_enable();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct event_constraint bts_constraint =
|
||||
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
|
||||
|
||||
static struct event_constraint *
|
||||
intel_special_constraints(struct perf_event *event)
|
||||
{
|
||||
unsigned int hw_event;
|
||||
|
||||
hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
|
||||
|
||||
if (unlikely((hw_event ==
|
||||
x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
|
||||
(event->hw.sample_period == 1))) {
|
||||
|
||||
return &bts_constraint;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct event_constraint *
|
||||
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||
{
|
||||
struct event_constraint *c;
|
||||
|
||||
c = intel_special_constraints(event);
|
||||
if (c)
|
||||
return c;
|
||||
|
||||
return x86_get_event_constraints(cpuc, event);
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu core_pmu = {
|
||||
.name = "core",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = x86_pmu_disable_all,
|
||||
.enable_all = x86_pmu_enable_all,
|
||||
.enable = x86_pmu_enable_event,
|
||||
.disable = x86_pmu_disable_event,
|
||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||
.event_map = intel_pmu_event_map,
|
||||
.raw_event = intel_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
||||
* so we install an artificial 1<<31 period regardless of
|
||||
* the generic event period:
|
||||
*/
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.get_event_constraints = intel_get_event_constraints,
|
||||
.event_constraints = intel_core_event_constraints,
|
||||
};
|
||||
|
||||
static __initconst struct x86_pmu intel_pmu = {
|
||||
.name = "Intel",
|
||||
.handle_irq = intel_pmu_handle_irq,
|
||||
.disable_all = intel_pmu_disable_all,
|
||||
.enable_all = intel_pmu_enable_all,
|
||||
.enable = intel_pmu_enable_event,
|
||||
.disable = intel_pmu_disable_event,
|
||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||
.event_map = intel_pmu_event_map,
|
||||
.raw_event = intel_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
||||
* so we install an artificial 1<<31 period regardless of
|
||||
* the generic event period:
|
||||
*/
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.enable_bts = intel_pmu_enable_bts,
|
||||
.disable_bts = intel_pmu_disable_bts,
|
||||
.get_event_constraints = intel_get_event_constraints
|
||||
};
|
||||
|
||||
static __init int intel_pmu_init(void)
|
||||
{
|
||||
union cpuid10_edx edx;
|
||||
union cpuid10_eax eax;
|
||||
unsigned int unused;
|
||||
unsigned int ebx;
|
||||
int version;
|
||||
|
||||
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
||||
/* check for P6 processor family */
|
||||
if (boot_cpu_data.x86 == 6) {
|
||||
return p6_pmu_init();
|
||||
} else {
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether the Architectural PerfMon supports
|
||||
* Branch Misses Retired hw_event or not.
|
||||
*/
|
||||
cpuid(10, &eax.full, &ebx, &unused, &edx.full);
|
||||
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
|
||||
return -ENODEV;
|
||||
|
||||
version = eax.split.version_id;
|
||||
if (version < 2)
|
||||
x86_pmu = core_pmu;
|
||||
else
|
||||
x86_pmu = intel_pmu;
|
||||
|
||||
x86_pmu.version = version;
|
||||
x86_pmu.num_events = eax.split.num_events;
|
||||
x86_pmu.event_bits = eax.split.bit_width;
|
||||
x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
|
||||
|
||||
/*
|
||||
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
||||
* assume at least 3 events:
|
||||
*/
|
||||
if (version > 1)
|
||||
x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
|
||||
|
||||
/*
|
||||
* Install the hw-cache-events table:
|
||||
*/
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 14: /* 65 nm core solo/duo, "Yonah" */
|
||||
pr_cont("Core events, ");
|
||||
break;
|
||||
|
||||
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
|
||||
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
|
||||
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
|
||||
case 29: /* six-core 45 nm xeon "Dunnington" */
|
||||
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
x86_pmu.event_constraints = intel_core2_event_constraints;
|
||||
pr_cont("Core2 events, ");
|
||||
break;
|
||||
|
||||
case 26: /* 45 nm nehalem, "Bloomfield" */
|
||||
case 30: /* 45 nm nehalem, "Lynnfield" */
|
||||
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
x86_pmu.event_constraints = intel_nehalem_event_constraints;
|
||||
pr_cont("Nehalem/Corei7 events, ");
|
||||
break;
|
||||
case 28:
|
||||
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
x86_pmu.event_constraints = intel_gen_event_constraints;
|
||||
pr_cont("Atom events, ");
|
||||
break;
|
||||
|
||||
case 37: /* 32 nm nehalem, "Clarkdale" */
|
||||
case 44: /* 32 nm nehalem, "Gulftown" */
|
||||
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
x86_pmu.event_constraints = intel_westmere_event_constraints;
|
||||
pr_cont("Westmere events, ");
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* default constraints for v2 and up
|
||||
*/
|
||||
x86_pmu.event_constraints = intel_gen_event_constraints;
|
||||
pr_cont("generic architected perfmon, ");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_CPU_SUP_INTEL */
|
||||
|
||||
static int intel_pmu_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
157
arch/x86/kernel/cpu/perf_event_p6.c
Normal file
157
arch/x86/kernel/cpu/perf_event_p6.c
Normal file
@ -0,0 +1,157 @@
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
|
||||
/*
|
||||
* Not sure about some of these
|
||||
*/
|
||||
static const u64 p6_perfmon_event_map[] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
|
||||
};
|
||||
|
||||
static u64 p6_pmu_event_map(int hw_event)
|
||||
{
|
||||
return p6_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
||||
/*
|
||||
* Event setting that is specified not to count anything.
|
||||
* We use this to effectively disable a counter.
|
||||
*
|
||||
* L2_RQSTS with 0 MESI unit mask.
|
||||
*/
|
||||
#define P6_NOP_EVENT 0x0000002EULL
|
||||
|
||||
static u64 p6_pmu_raw_event(u64 hw_event)
|
||||
{
|
||||
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
|
||||
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
||||
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
|
||||
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
|
||||
#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
|
||||
|
||||
#define P6_EVNTSEL_MASK \
|
||||
(P6_EVNTSEL_EVENT_MASK | \
|
||||
P6_EVNTSEL_UNIT_MASK | \
|
||||
P6_EVNTSEL_EDGE_MASK | \
|
||||
P6_EVNTSEL_INV_MASK | \
|
||||
P6_EVNTSEL_REG_MASK)
|
||||
|
||||
return hw_event & P6_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
static struct event_constraint p6_event_constraints[] =
|
||||
{
|
||||
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
||||
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
||||
INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
|
||||
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
||||
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
||||
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static void p6_pmu_disable_all(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
/* p6 only has one enable register */
|
||||
rdmsrl(MSR_P6_EVNTSEL0, val);
|
||||
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
wrmsrl(MSR_P6_EVNTSEL0, val);
|
||||
}
|
||||
|
||||
static void p6_pmu_enable_all(void)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
/* p6 only has one enable register */
|
||||
rdmsrl(MSR_P6_EVNTSEL0, val);
|
||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
wrmsrl(MSR_P6_EVNTSEL0, val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
u64 val = P6_NOP_EVENT;
|
||||
|
||||
if (cpuc->enabled)
|
||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
|
||||
(void)checking_wrmsrl(hwc->config_base + idx, val);
|
||||
}
|
||||
|
||||
static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
u64 val;
|
||||
|
||||
val = hwc->config;
|
||||
if (cpuc->enabled)
|
||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
|
||||
(void)checking_wrmsrl(hwc->config_base + idx, val);
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu p6_pmu = {
|
||||
.name = "p6",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = p6_pmu_disable_all,
|
||||
.enable_all = p6_pmu_enable_all,
|
||||
.enable = p6_pmu_enable_event,
|
||||
.disable = p6_pmu_disable_event,
|
||||
.eventsel = MSR_P6_EVNTSEL0,
|
||||
.perfctr = MSR_P6_PERFCTR0,
|
||||
.event_map = p6_pmu_event_map,
|
||||
.raw_event = p6_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.version = 0,
|
||||
.num_events = 2,
|
||||
/*
|
||||
* Events have 40 bits implemented. However they are designed such
|
||||
* that bits [32-39] are sign extensions of bit 31. As such the
|
||||
* effective width of a event for P6-like PMU is 32 bits only.
|
||||
*
|
||||
* See IA-32 Intel Architecture Software developer manual Vol 3B
|
||||
*/
|
||||
.event_bits = 32,
|
||||
.event_mask = (1ULL << 32) - 1,
|
||||
.get_event_constraints = x86_get_event_constraints,
|
||||
.event_constraints = p6_event_constraints,
|
||||
};
|
||||
|
||||
static __init int p6_pmu_init(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 1:
|
||||
case 3: /* Pentium Pro */
|
||||
case 5:
|
||||
case 6: /* Pentium II */
|
||||
case 7:
|
||||
case 8:
|
||||
case 11: /* Pentium III */
|
||||
case 9:
|
||||
case 13:
|
||||
/* Pentium M */
|
||||
break;
|
||||
default:
|
||||
pr_cont("unsupported p6 CPU model %d ",
|
||||
boot_cpu_data.x86_model);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
x86_pmu = p6_pmu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
@ -115,17 +115,6 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
|
||||
|
||||
return !test_bit(counter, perfctr_nmi_owner);
|
||||
}
|
||||
|
||||
/* checks the an msr for availability */
|
||||
int avail_to_resrv_perfctr_nmi(unsigned int msr)
|
||||
{
|
||||
unsigned int counter;
|
||||
|
||||
counter = nmi_perfctr_msr_to_bit(msr);
|
||||
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
||||
|
||||
return !test_bit(counter, perfctr_nmi_owner);
|
||||
}
|
||||
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
|
||||
|
||||
int reserve_perfctr_nmi(unsigned int msr)
|
||||
|
@ -18,11 +18,6 @@
|
||||
|
||||
#include "dumpstack.h"
|
||||
|
||||
/* Just a stub for now */
|
||||
int x86_is_stack_id(int id, char *name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
|
@ -33,11 +33,6 @@ static char x86_stack_ids[][8] = {
|
||||
#endif
|
||||
};
|
||||
|
||||
int x86_is_stack_id(int id, char *name)
|
||||
{
|
||||
return x86_stack_ids[id - 1] == name;
|
||||
}
|
||||
|
||||
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
|
||||
unsigned *usedp, char **idp)
|
||||
{
|
||||
|
@ -486,8 +486,6 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
|
||||
rcu_read_lock();
|
||||
|
||||
bp = per_cpu(bp_per_reg[i], cpu);
|
||||
if (bp)
|
||||
rc = NOTIFY_DONE;
|
||||
/*
|
||||
* Reset the 'i'th TRAP bit in dr6 to denote completion of
|
||||
* exception handling
|
||||
@ -506,7 +504,13 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
if (dr6 & (~DR_TRAP_BITS))
|
||||
/*
|
||||
* Further processing in do_debug() is needed for a) user-space
|
||||
* breakpoints (to generate signals) and b) when the system has
|
||||
* taken exception due to multiple causes
|
||||
*/
|
||||
if ((current->thread.debugreg6 & DR_TRAP_BITS) ||
|
||||
(dr6 & (~DR_TRAP_BITS)))
|
||||
rc = NOTIFY_DONE;
|
||||
|
||||
set_debugreg(dr7, 7);
|
||||
|
@ -337,6 +337,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (alternatives_text_reserved(p->addr, p->addr))
|
||||
return -EINVAL;
|
||||
|
||||
if (!can_probe((unsigned long)p->addr))
|
||||
return -EILSEQ;
|
||||
/* insn: must be on special executable page on x86. */
|
||||
@ -429,7 +432,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
|
||||
#if !defined(CONFIG_PREEMPT)
|
||||
if (p->ainsn.boostable == 1 && !p->post_handler) {
|
||||
/* Boost up -- we can execute copied instructions directly */
|
||||
reset_current_kprobe();
|
||||
|
@ -140,30 +140,6 @@ static const int arg_offs_table[] = {
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* regs_get_argument_nth() - get Nth argument at function call
|
||||
* @regs: pt_regs which contains registers at function entry.
|
||||
* @n: argument number.
|
||||
*
|
||||
* regs_get_argument_nth() returns @n th argument of a function call.
|
||||
* Since usually the kernel stack will be changed right after function entry,
|
||||
* you must use this at function entry. If the @n th entry is NOT in the
|
||||
* kernel stack or pt_regs, this returns 0.
|
||||
*/
|
||||
unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n)
|
||||
{
|
||||
if (n < ARRAY_SIZE(arg_offs_table))
|
||||
return *(unsigned long *)((char *)regs + arg_offs_table[n]);
|
||||
else {
|
||||
/*
|
||||
* The typical case: arg n is on the stack.
|
||||
* (Note: stack[0] = return address, so skip it)
|
||||
*/
|
||||
n -= ARRAY_SIZE(arg_offs_table);
|
||||
return regs_get_kernel_stack_nth(regs, 1 + n);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* does not yet catch signals sent when the child dies.
|
||||
* in exit.c or in signal.c.
|
||||
|
@ -534,6 +534,9 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
||||
|
||||
get_debugreg(dr6, 6);
|
||||
|
||||
/* Filter out all the reserved bits which are preset to 1 */
|
||||
dr6 &= ~DR6_RESERVED;
|
||||
|
||||
/* Catch kmemcheck conditions first of all! */
|
||||
if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
|
||||
return;
|
||||
|
@ -25,7 +25,7 @@
|
||||
static __inline__ int get_bitmask_order(unsigned int count)
|
||||
{
|
||||
int order;
|
||||
|
||||
|
||||
order = fls(count);
|
||||
return order; /* We could be slightly more clever with -1 here... */
|
||||
}
|
||||
@ -33,7 +33,7 @@ static __inline__ int get_bitmask_order(unsigned int count)
|
||||
static __inline__ int get_count_order(unsigned int count)
|
||||
{
|
||||
int order;
|
||||
|
||||
|
||||
order = fls(count) - 1;
|
||||
if (count & (count - 1))
|
||||
order++;
|
||||
@ -45,6 +45,31 @@ static inline unsigned long hweight_long(unsigned long w)
|
||||
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clearly slow versions of the hweightN() functions, their benefit is
|
||||
* of course compile time evaluation of constant arguments.
|
||||
*/
|
||||
#define HWEIGHT8(w) \
|
||||
( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
|
||||
(!!((w) & (1ULL << 0))) + \
|
||||
(!!((w) & (1ULL << 1))) + \
|
||||
(!!((w) & (1ULL << 2))) + \
|
||||
(!!((w) & (1ULL << 3))) + \
|
||||
(!!((w) & (1ULL << 4))) + \
|
||||
(!!((w) & (1ULL << 5))) + \
|
||||
(!!((w) & (1ULL << 6))) + \
|
||||
(!!((w) & (1ULL << 7))) )
|
||||
|
||||
#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
|
||||
#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
|
||||
#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
|
||||
|
||||
/*
|
||||
* Type invariant version that simply casts things to the
|
||||
* largest type.
|
||||
*/
|
||||
#define HWEIGHT(w) HWEIGHT64((u64)(w))
|
||||
|
||||
/**
|
||||
* rol32 - rotate a 32-bit value left
|
||||
* @word: value to rotate
|
||||
|
@ -134,6 +134,8 @@ extern void
|
||||
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
|
||||
extern void unregister_ftrace_function_probe_all(char *glob);
|
||||
|
||||
extern int ftrace_text_reserved(void *start, void *end);
|
||||
|
||||
enum {
|
||||
FTRACE_FL_FREE = (1 << 0),
|
||||
FTRACE_FL_FAILED = (1 << 1),
|
||||
@ -141,7 +143,6 @@ enum {
|
||||
FTRACE_FL_ENABLED = (1 << 3),
|
||||
FTRACE_FL_NOTRACE = (1 << 4),
|
||||
FTRACE_FL_CONVERTED = (1 << 5),
|
||||
FTRACE_FL_FROZEN = (1 << 6),
|
||||
};
|
||||
|
||||
struct dyn_ftrace {
|
||||
@ -250,6 +251,10 @@ static inline int unregister_ftrace_command(char *cmd_name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int ftrace_text_reserved(void *start, void *end)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
/* totally disable ftrace - can not re-enable after this */
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/trace_seq.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
struct trace_array;
|
||||
struct tracer;
|
||||
@ -137,9 +138,6 @@ struct ftrace_event_call {
|
||||
|
||||
#define FTRACE_MAX_PROFILE_SIZE 2048
|
||||
|
||||
extern char *perf_trace_buf;
|
||||
extern char *perf_trace_buf_nmi;
|
||||
|
||||
#define MAX_FILTER_PRED 32
|
||||
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
|
||||
|
||||
@ -187,13 +185,27 @@ do { \
|
||||
__trace_printk(ip, fmt, ##args); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
struct perf_event;
|
||||
extern int ftrace_profile_enable(int event_id);
|
||||
extern void ftrace_profile_disable(int event_id);
|
||||
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
||||
char *filter_str);
|
||||
extern void ftrace_profile_free_filter(struct perf_event *event);
|
||||
extern void *
|
||||
ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp,
|
||||
unsigned long *irq_flags);
|
||||
|
||||
static inline void
|
||||
ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr,
|
||||
u64 count, unsigned long irq_flags)
|
||||
{
|
||||
struct trace_entry *entry = raw_data;
|
||||
|
||||
perf_tp_event(entry->type, addr, count, raw_data, size);
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
local_irq_restore(irq_flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_FTRACE_EVENT_H */
|
||||
|
@ -205,6 +205,20 @@ static inline int list_empty_careful(const struct list_head *head)
|
||||
return (next == head) && (next == head->prev);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_rotate_left - rotate the list to the left
|
||||
* @head: the head of the list
|
||||
*/
|
||||
static inline void list_rotate_left(struct list_head *head)
|
||||
{
|
||||
struct list_head *first;
|
||||
|
||||
if (!list_empty(head)) {
|
||||
first = head->next;
|
||||
list_move_tail(first, head);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* list_is_singular - tests whether a list has just one entry.
|
||||
* @head: the list to test.
|
||||
|
@ -288,7 +288,7 @@ struct perf_event_mmap_page {
|
||||
};
|
||||
|
||||
#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
|
||||
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
|
||||
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
|
||||
#define PERF_RECORD_MISC_KERNEL (1 << 0)
|
||||
#define PERF_RECORD_MISC_USER (2 << 0)
|
||||
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
|
||||
@ -354,8 +354,8 @@ enum perf_event_type {
|
||||
* u64 stream_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_THROTTLE = 5,
|
||||
PERF_RECORD_UNTHROTTLE = 6,
|
||||
PERF_RECORD_THROTTLE = 5,
|
||||
PERF_RECORD_UNTHROTTLE = 6,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
@ -369,10 +369,10 @@ enum perf_event_type {
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, tid;
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, tid;
|
||||
*
|
||||
* struct read_format values;
|
||||
* struct read_format values;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_READ = 8,
|
||||
@ -410,7 +410,7 @@ enum perf_event_type {
|
||||
* char data[size];}&& PERF_SAMPLE_RAW
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_SAMPLE = 9,
|
||||
PERF_RECORD_SAMPLE = 9,
|
||||
|
||||
PERF_RECORD_MAX, /* non-ABI */
|
||||
};
|
||||
@ -476,9 +476,11 @@ struct hw_perf_event {
|
||||
union {
|
||||
struct { /* hardware */
|
||||
u64 config;
|
||||
u64 last_tag;
|
||||
unsigned long config_base;
|
||||
unsigned long event_base;
|
||||
int idx;
|
||||
int last_cpu;
|
||||
};
|
||||
struct { /* software */
|
||||
s64 remaining;
|
||||
@ -496,9 +498,8 @@ struct hw_perf_event {
|
||||
atomic64_t period_left;
|
||||
u64 interrupts;
|
||||
|
||||
u64 freq_count;
|
||||
u64 freq_interrupts;
|
||||
u64 freq_stamp;
|
||||
u64 freq_time_stamp;
|
||||
u64 freq_count_stamp;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -510,6 +511,8 @@ struct perf_event;
|
||||
struct pmu {
|
||||
int (*enable) (struct perf_event *event);
|
||||
void (*disable) (struct perf_event *event);
|
||||
int (*start) (struct perf_event *event);
|
||||
void (*stop) (struct perf_event *event);
|
||||
void (*read) (struct perf_event *event);
|
||||
void (*unthrottle) (struct perf_event *event);
|
||||
};
|
||||
@ -563,6 +566,10 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
|
||||
struct perf_sample_data *,
|
||||
struct pt_regs *regs);
|
||||
|
||||
enum perf_group_flag {
|
||||
PERF_GROUP_SOFTWARE = 0x1,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_event - performance event kernel representation:
|
||||
*/
|
||||
@ -572,6 +579,7 @@ struct perf_event {
|
||||
struct list_head event_entry;
|
||||
struct list_head sibling_list;
|
||||
int nr_siblings;
|
||||
int group_flags;
|
||||
struct perf_event *group_leader;
|
||||
struct perf_event *output;
|
||||
const struct pmu *pmu;
|
||||
@ -656,7 +664,7 @@ struct perf_event {
|
||||
|
||||
perf_overflow_handler_t overflow_handler;
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
struct event_filter *filter;
|
||||
#endif
|
||||
|
||||
@ -681,7 +689,8 @@ struct perf_event_context {
|
||||
*/
|
||||
struct mutex mutex;
|
||||
|
||||
struct list_head group_list;
|
||||
struct list_head pinned_groups;
|
||||
struct list_head flexible_groups;
|
||||
struct list_head event_list;
|
||||
int nr_events;
|
||||
int nr_active;
|
||||
@ -744,10 +753,9 @@ extern int perf_max_events;
|
||||
|
||||
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
|
||||
|
||||
extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
|
||||
extern void perf_event_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next, int cpu);
|
||||
extern void perf_event_task_tick(struct task_struct *task, int cpu);
|
||||
extern void perf_event_task_sched_in(struct task_struct *task);
|
||||
extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
||||
extern void perf_event_task_tick(struct task_struct *task);
|
||||
extern int perf_event_init_task(struct task_struct *child);
|
||||
extern void perf_event_exit_task(struct task_struct *child);
|
||||
extern void perf_event_free_task(struct task_struct *task);
|
||||
@ -762,7 +770,7 @@ extern int perf_event_task_disable(void);
|
||||
extern int perf_event_task_enable(void);
|
||||
extern int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx, int cpu);
|
||||
struct perf_event_context *ctx);
|
||||
extern void perf_event_update_userpage(struct perf_event *event);
|
||||
extern int perf_event_release_kernel(struct perf_event *event);
|
||||
extern struct perf_event *
|
||||
@ -851,8 +859,7 @@ extern int sysctl_perf_event_mlock;
|
||||
extern int sysctl_perf_event_sample_rate;
|
||||
|
||||
extern void perf_event_init(void);
|
||||
extern void perf_tp_event(int event_id, u64 addr, u64 count,
|
||||
void *record, int entry_size);
|
||||
extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size);
|
||||
extern void perf_bp_event(struct perf_event *event, void *data);
|
||||
|
||||
#ifndef perf_misc_flags
|
||||
@ -873,12 +880,12 @@ extern void perf_event_enable(struct perf_event *event);
|
||||
extern void perf_event_disable(struct perf_event *event);
|
||||
#else
|
||||
static inline void
|
||||
perf_event_task_sched_in(struct task_struct *task, int cpu) { }
|
||||
perf_event_task_sched_in(struct task_struct *task) { }
|
||||
static inline void
|
||||
perf_event_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next, int cpu) { }
|
||||
struct task_struct *next) { }
|
||||
static inline void
|
||||
perf_event_task_tick(struct task_struct *task, int cpu) { }
|
||||
perf_event_task_tick(struct task_struct *task) { }
|
||||
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
|
||||
static inline void perf_event_exit_task(struct task_struct *child) { }
|
||||
static inline void perf_event_free_task(struct task_struct *task) { }
|
||||
@ -893,13 +900,13 @@ static inline void
|
||||
perf_sw_event(u32 event_id, u64 nr, int nmi,
|
||||
struct pt_regs *regs, u64 addr) { }
|
||||
static inline void
|
||||
perf_bp_event(struct perf_event *event, void *data) { }
|
||||
perf_bp_event(struct perf_event *event, void *data) { }
|
||||
|
||||
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
|
||||
static inline void perf_event_comm(struct task_struct *tsk) { }
|
||||
static inline void perf_event_fork(struct task_struct *tsk) { }
|
||||
static inline void perf_event_init(void) { }
|
||||
static inline int perf_swevent_get_recursion_context(void) { return -1; }
|
||||
static inline int perf_swevent_get_recursion_context(void) { return -1; }
|
||||
static inline void perf_swevent_put_recursion_context(int rctx) { }
|
||||
static inline void perf_event_enable(struct perf_event *event) { }
|
||||
static inline void perf_event_disable(struct perf_event *event) { }
|
||||
|
@ -99,7 +99,7 @@ struct perf_event_attr;
|
||||
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
|
||||
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
|
||||
.profile_enable = prof_sysenter_enable, \
|
||||
@ -113,7 +113,7 @@ struct perf_event_attr;
|
||||
#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
|
||||
#define TRACE_SYS_EXIT_PROFILE(sname)
|
||||
#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
|
||||
#endif
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
#define __SC_STR_ADECL1(t, a) #a
|
||||
|
@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire,
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, flags)
|
||||
__string(name, lock->name)
|
||||
__field(void *, lockdep_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
|
||||
__assign_str(name, lock->name);
|
||||
__entry->lockdep_addr = lock;
|
||||
),
|
||||
|
||||
TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "",
|
||||
TP_printk("%p %s%s%s", __entry->lockdep_addr,
|
||||
(__entry->flags & 1) ? "try " : "",
|
||||
(__entry->flags & 2) ? "read " : "",
|
||||
__get_str(name))
|
||||
);
|
||||
@ -40,13 +43,16 @@ TRACE_EVENT(lock_release,
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, lock->name)
|
||||
__field(void *, lockdep_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, lock->name);
|
||||
__entry->lockdep_addr = lock;
|
||||
),
|
||||
|
||||
TP_printk("%s", __get_str(name))
|
||||
TP_printk("%p %s",
|
||||
__entry->lockdep_addr, __get_str(name))
|
||||
);
|
||||
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended,
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, lock->name)
|
||||
__field(void *, lockdep_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, lock->name);
|
||||
__entry->lockdep_addr = lock;
|
||||
),
|
||||
|
||||
TP_printk("%s", __get_str(name))
|
||||
TP_printk("%p %s",
|
||||
__entry->lockdep_addr, __get_str(name))
|
||||
);
|
||||
|
||||
TRACE_EVENT(lock_acquired,
|
||||
@ -75,16 +84,18 @@ TRACE_EVENT(lock_acquired,
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(name, lock->name)
|
||||
__field(unsigned long, wait_usec)
|
||||
__field(unsigned long, wait_nsec_rem)
|
||||
__field(s64, wait_nsec)
|
||||
__field(void *, lockdep_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, lock->name);
|
||||
__entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
|
||||
__entry->wait_usec = (unsigned long) waittime;
|
||||
__entry->wait_nsec = waittime;
|
||||
__entry->lockdep_addr = lock;
|
||||
),
|
||||
TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec,
|
||||
__entry->wait_nsec_rem)
|
||||
TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
|
||||
__get_str(name),
|
||||
__entry->wait_nsec)
|
||||
);
|
||||
|
||||
#endif
|
||||
|
@ -376,7 +376,7 @@ static inline notrace int ftrace_get_offsets_##call( \
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
/*
|
||||
* Generate the functions needed for tracepoint perf_event support.
|
||||
@ -421,7 +421,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
/*
|
||||
* Stage 4 of the trace events.
|
||||
@ -505,7 +505,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
#define _TRACE_PROFILE_INIT(call) \
|
||||
.profile_enable = ftrace_profile_enable_##call, \
|
||||
@ -513,7 +513,7 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
|
||||
|
||||
#else
|
||||
#define _TRACE_PROFILE_INIT(call)
|
||||
#endif
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
#undef __entry
|
||||
#define __entry entry
|
||||
@ -736,7 +736,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
* }
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
#undef __entry
|
||||
#define __entry entry
|
||||
@ -761,22 +761,12 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
|
||||
proto) \
|
||||
{ \
|
||||
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
|
||||
extern int perf_swevent_get_recursion_context(void); \
|
||||
extern void perf_swevent_put_recursion_context(int rctx); \
|
||||
extern void perf_tp_event(int, u64, u64, void *, int); \
|
||||
struct ftrace_raw_##call *entry; \
|
||||
u64 __addr = 0, __count = 1; \
|
||||
unsigned long irq_flags; \
|
||||
struct trace_entry *ent; \
|
||||
int __entry_size; \
|
||||
int __data_size; \
|
||||
char *trace_buf; \
|
||||
char *raw_data; \
|
||||
int __cpu; \
|
||||
int rctx; \
|
||||
int pc; \
|
||||
\
|
||||
pc = preempt_count(); \
|
||||
\
|
||||
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
||||
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
|
||||
@ -786,42 +776,16 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
|
||||
if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
|
||||
"profile buffer not large enough")) \
|
||||
return; \
|
||||
\
|
||||
local_irq_save(irq_flags); \
|
||||
\
|
||||
rctx = perf_swevent_get_recursion_context(); \
|
||||
if (rctx < 0) \
|
||||
goto end_recursion; \
|
||||
\
|
||||
__cpu = smp_processor_id(); \
|
||||
\
|
||||
if (in_nmi()) \
|
||||
trace_buf = rcu_dereference(perf_trace_buf_nmi); \
|
||||
else \
|
||||
trace_buf = rcu_dereference(perf_trace_buf); \
|
||||
\
|
||||
if (!trace_buf) \
|
||||
goto end; \
|
||||
\
|
||||
raw_data = per_cpu_ptr(trace_buf, __cpu); \
|
||||
\
|
||||
*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
|
||||
entry = (struct ftrace_raw_##call *)raw_data; \
|
||||
ent = &entry->ent; \
|
||||
tracing_generic_entry_update(ent, irq_flags, pc); \
|
||||
ent->type = event_call->id; \
|
||||
\
|
||||
entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \
|
||||
__entry_size, event_call->id, &rctx, &irq_flags); \
|
||||
if (!entry) \
|
||||
return; \
|
||||
tstruct \
|
||||
\
|
||||
{ assign; } \
|
||||
\
|
||||
perf_tp_event(event_call->id, __addr, __count, entry, \
|
||||
__entry_size); \
|
||||
\
|
||||
end: \
|
||||
perf_swevent_put_recursion_context(rctx); \
|
||||
end_recursion: \
|
||||
local_irq_restore(irq_flags); \
|
||||
ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \
|
||||
__count, irq_flags); \
|
||||
}
|
||||
|
||||
#undef DEFINE_EVENT
|
||||
@ -838,7 +802,7 @@ static notrace void ftrace_profile_##call(proto) \
|
||||
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
#undef _TRACE_PROFILE_INIT
|
||||
|
||||
|
@ -45,12 +45,12 @@ ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
|
||||
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
|
||||
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
|
||||
#endif
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
int prof_sysenter_enable(struct ftrace_event_call *call);
|
||||
void prof_sysenter_disable(struct ftrace_event_call *call);
|
||||
int prof_sysexit_enable(struct ftrace_event_call *call);
|
||||
void prof_sysexit_disable(struct ftrace_event_call *call);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _TRACE_SYSCALL_H */
|
||||
|
13
init/Kconfig
13
init/Kconfig
@ -992,19 +992,6 @@ config PERF_EVENTS
|
||||
|
||||
Say Y if unsure.
|
||||
|
||||
config EVENT_PROFILE
|
||||
bool "Tracepoint profiling sources"
|
||||
depends on PERF_EVENTS && EVENT_TRACING
|
||||
default y
|
||||
help
|
||||
Allow the use of tracepoints as software performance events.
|
||||
|
||||
When this is enabled, you can create perf events based on
|
||||
tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
|
||||
found in debugfs://tracing/events/*/*/id. (The -e/--events
|
||||
option to the perf tool can parse and interpret symbolic
|
||||
tracepoints, in the subsystem:tracepoint_name format.)
|
||||
|
||||
config PERF_COUNTERS
|
||||
bool "Kernel performance counters (old config option)"
|
||||
depends on HAVE_PERF_EVENTS
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
#include <asm-generic/sections.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@ -125,30 +126,6 @@ static LIST_HEAD(kprobe_insn_pages);
|
||||
static int kprobe_garbage_slots;
|
||||
static int collect_garbage_slots(void);
|
||||
|
||||
static int __kprobes check_safety(void)
|
||||
{
|
||||
int ret = 0;
|
||||
#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
|
||||
ret = freeze_processes();
|
||||
if (ret == 0) {
|
||||
struct task_struct *p, *q;
|
||||
do_each_thread(p, q) {
|
||||
if (p != current && p->state == TASK_RUNNING &&
|
||||
p->pid != 0) {
|
||||
printk("Check failed: %s is running\n",p->comm);
|
||||
ret = -1;
|
||||
goto loop_end;
|
||||
}
|
||||
} while_each_thread(p, q);
|
||||
}
|
||||
loop_end:
|
||||
thaw_processes();
|
||||
#else
|
||||
synchronize_sched();
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* __get_insn_slot() - Find a slot on an executable page for an instruction.
|
||||
* We allocate an executable page if there's no room on existing ones.
|
||||
@ -236,9 +213,8 @@ static int __kprobes collect_garbage_slots(void)
|
||||
{
|
||||
struct kprobe_insn_page *kip, *next;
|
||||
|
||||
/* Ensure no-one is preepmted on the garbages */
|
||||
if (check_safety())
|
||||
return -EAGAIN;
|
||||
/* Ensure no-one is interrupted on the garbages */
|
||||
synchronize_sched();
|
||||
|
||||
list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
|
||||
int i;
|
||||
@ -729,7 +705,8 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
|
||||
preempt_disable();
|
||||
if (!kernel_text_address((unsigned long) p->addr) ||
|
||||
in_kprobes_functions((unsigned long) p->addr)) {
|
||||
in_kprobes_functions((unsigned long) p->addr) ||
|
||||
ftrace_text_reserved(p->addr, p->addr)) {
|
||||
preempt_enable();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2799,7 +2799,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
||||
*/
|
||||
prev_state = prev->state;
|
||||
finish_arch_switch(prev);
|
||||
perf_event_task_sched_in(current, cpu_of(rq));
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
local_irq_disable();
|
||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
||||
perf_event_task_sched_in(current);
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
local_irq_enable();
|
||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
||||
finish_lock_switch(rq, prev);
|
||||
|
||||
fire_sched_in_preempt_notifiers(current);
|
||||
@ -5314,7 +5320,7 @@ void scheduler_tick(void)
|
||||
curr->sched_class->task_tick(rq, curr, 0);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
|
||||
perf_event_task_tick(curr, cpu);
|
||||
perf_event_task_tick(curr);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
rq->idle_at_tick = idle_cpu(cpu);
|
||||
@ -5528,7 +5534,7 @@ need_resched_nonpreemptible:
|
||||
|
||||
if (likely(prev != next)) {
|
||||
sched_info_switch(prev, next);
|
||||
perf_event_task_sched_out(prev, next, cpu);
|
||||
perf_event_task_sched_out(prev, next);
|
||||
|
||||
rq->nr_switches++;
|
||||
rq->curr = next;
|
||||
|
@ -51,7 +51,9 @@ endif
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_events.o
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_export.o
|
||||
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
|
||||
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
|
||||
ifeq ($(CONFIG_PERF_EVENTS),y)
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
|
||||
endif
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
|
||||
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
|
||||
obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/ctype.h>
|
||||
@ -898,36 +897,6 @@ static struct dyn_ftrace *ftrace_free_records;
|
||||
} \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
|
||||
static int frozen_record_count;
|
||||
|
||||
static inline void freeze_record(struct dyn_ftrace *rec)
|
||||
{
|
||||
if (!(rec->flags & FTRACE_FL_FROZEN)) {
|
||||
rec->flags |= FTRACE_FL_FROZEN;
|
||||
frozen_record_count++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void unfreeze_record(struct dyn_ftrace *rec)
|
||||
{
|
||||
if (rec->flags & FTRACE_FL_FROZEN) {
|
||||
rec->flags &= ~FTRACE_FL_FROZEN;
|
||||
frozen_record_count--;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int record_frozen(struct dyn_ftrace *rec)
|
||||
{
|
||||
return rec->flags & FTRACE_FL_FROZEN;
|
||||
}
|
||||
#else
|
||||
# define freeze_record(rec) ({ 0; })
|
||||
# define unfreeze_record(rec) ({ 0; })
|
||||
# define record_frozen(rec) ({ 0; })
|
||||
#endif /* CONFIG_KPROBES */
|
||||
|
||||
static void ftrace_free_rec(struct dyn_ftrace *rec)
|
||||
{
|
||||
rec->freelist = ftrace_free_records;
|
||||
@ -1025,6 +994,21 @@ static void ftrace_bug(int failed, unsigned long ip)
|
||||
}
|
||||
|
||||
|
||||
/* Return 1 if the address range is reserved for ftrace */
|
||||
int ftrace_text_reserved(void *start, void *end)
|
||||
{
|
||||
struct dyn_ftrace *rec;
|
||||
struct ftrace_page *pg;
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
if (rec->ip <= (unsigned long)end &&
|
||||
rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
|
||||
return 1;
|
||||
} while_for_each_ftrace_rec();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
||||
{
|
||||
@ -1076,14 +1060,6 @@ static void ftrace_replace_code(int enable)
|
||||
!(rec->flags & FTRACE_FL_CONVERTED))
|
||||
continue;
|
||||
|
||||
/* ignore updates to this record's mcount site */
|
||||
if (get_kprobe((void *)rec->ip)) {
|
||||
freeze_record(rec);
|
||||
continue;
|
||||
} else {
|
||||
unfreeze_record(rec);
|
||||
}
|
||||
|
||||
failed = __ftrace_replace_code(rec, enable);
|
||||
if (failed) {
|
||||
rec->flags |= FTRACE_FL_FAILED;
|
||||
|
@ -6,14 +6,12 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include "trace.h"
|
||||
|
||||
|
||||
char *perf_trace_buf;
|
||||
EXPORT_SYMBOL_GPL(perf_trace_buf);
|
||||
|
||||
char *perf_trace_buf_nmi;
|
||||
EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
|
||||
static char *perf_trace_buf;
|
||||
static char *perf_trace_buf_nmi;
|
||||
|
||||
typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
|
||||
|
||||
@ -120,3 +118,47 @@ void ftrace_profile_disable(int event_id)
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
|
||||
int *rctxp, unsigned long *irq_flags)
|
||||
{
|
||||
struct trace_entry *entry;
|
||||
char *trace_buf, *raw_data;
|
||||
int pc, cpu;
|
||||
|
||||
pc = preempt_count();
|
||||
|
||||
/* Protect the per cpu buffer, begin the rcu read side */
|
||||
local_irq_save(*irq_flags);
|
||||
|
||||
*rctxp = perf_swevent_get_recursion_context();
|
||||
if (*rctxp < 0)
|
||||
goto err_recursion;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
||||
else
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto err;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, cpu);
|
||||
|
||||
/* zero the dead bytes from align to not leak stack to user */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
|
||||
entry = (struct trace_entry *)raw_data;
|
||||
tracing_generic_entry_update(entry, *irq_flags, pc);
|
||||
entry->type = type;
|
||||
|
||||
return raw_data;
|
||||
err:
|
||||
perf_swevent_put_recursion_context(*rctxp);
|
||||
err_recursion:
|
||||
local_irq_restore(*irq_flags);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);
|
||||
|
@ -1371,7 +1371,7 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
void ftrace_profile_free_filter(struct perf_event *event)
|
||||
{
|
||||
@ -1439,5 +1439,5 @@ out_unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
|
@ -91,11 +91,6 @@ static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
|
||||
{
|
||||
return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
|
||||
}
|
||||
|
||||
static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
|
||||
void *dummy)
|
||||
{
|
||||
@ -231,9 +226,7 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (ff->func == fetch_argument)
|
||||
ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
|
||||
else if (ff->func == fetch_register) {
|
||||
if (ff->func == fetch_register) {
|
||||
const char *name;
|
||||
name = regs_query_register_name((unsigned int)((long)ff->data));
|
||||
ret = snprintf(buf, n, "%%%s", name);
|
||||
@ -489,14 +482,6 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
|
||||
}
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
} else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
|
||||
ret = strict_strtoul(arg + 3, 10, ¶m);
|
||||
if (ret || param > PARAM_MAX_ARGS)
|
||||
ret = -EINVAL;
|
||||
else {
|
||||
ff->func = fetch_argument;
|
||||
ff->data = (void *)param;
|
||||
}
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
@ -611,7 +596,6 @@ static int create_trace_probe(int argc, char **argv)
|
||||
* - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
|
||||
* - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
|
||||
* Fetch args:
|
||||
* $argN : fetch Nth of function argument. (N:0-)
|
||||
* $retval : fetch return value
|
||||
* $stack : fetch stack address
|
||||
* $stackN : fetch Nth of stack (N:0-)
|
||||
@ -958,7 +942,7 @@ static const struct file_operations kprobe_profile_ops = {
|
||||
};
|
||||
|
||||
/* Kprobe handler */
|
||||
static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||
struct kprobe_trace_entry *entry;
|
||||
@ -978,7 +962,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
|
||||
irq_flags, pc);
|
||||
if (!event)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->nargs = tp->nr_args;
|
||||
@ -988,11 +972,10 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
|
||||
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Kretprobe handler */
|
||||
static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||
@ -1011,7 +994,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
|
||||
irq_flags, pc);
|
||||
if (!event)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->nargs = tp->nr_args;
|
||||
@ -1022,8 +1005,6 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
|
||||
|
||||
if (!filter_current_check_discard(buffer, call, entry, event))
|
||||
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Event entry printers */
|
||||
@ -1230,137 +1211,67 @@ static int set_print_fmt(struct trace_probe *tp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
/* Kprobe profile handler */
|
||||
static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
||||
static __kprobes void kprobe_profile_func(struct kprobe *kp,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
||||
struct ftrace_event_call *call = &tp->call;
|
||||
struct kprobe_trace_entry *entry;
|
||||
struct trace_entry *ent;
|
||||
int size, __size, i, pc, __cpu;
|
||||
int size, __size, i;
|
||||
unsigned long irq_flags;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int rctx;
|
||||
|
||||
pc = preempt_count();
|
||||
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Protect the non nmi buffer
|
||||
* This also protects the rcu read side
|
||||
*/
|
||||
local_irq_save(irq_flags);
|
||||
entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
goto end_recursion;
|
||||
|
||||
__cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
||||
else
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, __cpu);
|
||||
|
||||
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
entry = (struct kprobe_trace_entry *)raw_data;
|
||||
ent = &entry->ent;
|
||||
|
||||
tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
ent->type = call->id;
|
||||
entry->nargs = tp->nr_args;
|
||||
entry->ip = (unsigned long)kp->addr;
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ip, 1, entry, size);
|
||||
|
||||
end:
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
end_recursion:
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
return 0;
|
||||
ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags);
|
||||
}
|
||||
|
||||
/* Kretprobe profile handler */
|
||||
static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
||||
struct ftrace_event_call *call = &tp->call;
|
||||
struct kretprobe_trace_entry *entry;
|
||||
struct trace_entry *ent;
|
||||
int size, __size, i, pc, __cpu;
|
||||
int size, __size, i;
|
||||
unsigned long irq_flags;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int rctx;
|
||||
|
||||
pc = preempt_count();
|
||||
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
|
||||
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
||||
size -= sizeof(u32);
|
||||
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
|
||||
"profile buffer not large enough"))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Protect the non nmi buffer
|
||||
* This also protects the rcu read side
|
||||
*/
|
||||
local_irq_save(irq_flags);
|
||||
entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
goto end_recursion;
|
||||
|
||||
__cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
||||
else
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, __cpu);
|
||||
|
||||
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
entry = (struct kretprobe_trace_entry *)raw_data;
|
||||
ent = &entry->ent;
|
||||
|
||||
tracing_generic_entry_update(ent, irq_flags, pc);
|
||||
ent->type = call->id;
|
||||
entry->nargs = tp->nr_args;
|
||||
entry->func = (unsigned long)tp->rp.kp.addr;
|
||||
entry->ret_ip = (unsigned long)ri->ret_addr;
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
||||
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
|
||||
|
||||
end:
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
end_recursion:
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
return 0;
|
||||
ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags);
|
||||
}
|
||||
|
||||
static int probe_profile_enable(struct ftrace_event_call *call)
|
||||
@ -1388,7 +1299,7 @@ static void probe_profile_disable(struct ftrace_event_call *call)
|
||||
disable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
|
||||
static __kprobes
|
||||
@ -1398,10 +1309,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
|
||||
|
||||
if (tp->flags & TP_FLAG_TRACE)
|
||||
kprobe_trace_func(kp, regs);
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (tp->flags & TP_FLAG_PROFILE)
|
||||
kprobe_profile_func(kp, regs);
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif
|
||||
return 0; /* We don't tweek kernel, so just return 0 */
|
||||
}
|
||||
|
||||
@ -1412,10 +1323,10 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||
|
||||
if (tp->flags & TP_FLAG_TRACE)
|
||||
kretprobe_trace_func(ri, regs);
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (tp->flags & TP_FLAG_PROFILE)
|
||||
kretprobe_profile_func(ri, regs);
|
||||
#endif /* CONFIG_EVENT_PROFILE */
|
||||
#endif
|
||||
return 0; /* We don't tweek kernel, so just return 0 */
|
||||
}
|
||||
|
||||
@ -1446,7 +1357,7 @@ static int register_probe_event(struct trace_probe *tp)
|
||||
call->regfunc = probe_event_enable;
|
||||
call->unregfunc = probe_event_disable;
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
call->profile_enable = probe_profile_enable;
|
||||
call->profile_disable = probe_profile_disable;
|
||||
#endif
|
||||
@ -1507,28 +1418,67 @@ static int kprobe_trace_selftest_target(int a1, int a2, int a3,
|
||||
|
||||
static __init int kprobe_trace_self_tests_init(void)
|
||||
{
|
||||
int ret;
|
||||
int ret, warn = 0;
|
||||
int (*target)(int, int, int, int, int, int);
|
||||
struct trace_probe *tp;
|
||||
|
||||
target = kprobe_trace_selftest_target;
|
||||
|
||||
pr_info("Testing kprobe tracing: ");
|
||||
|
||||
ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
|
||||
"$arg1 $arg2 $arg3 $arg4 $stack $stack0");
|
||||
if (WARN_ON_ONCE(ret))
|
||||
pr_warning("error enabling function entry\n");
|
||||
"$stack $stack0 +0($stack)");
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
pr_warning("error on probing function entry.\n");
|
||||
warn++;
|
||||
} else {
|
||||
/* Enable trace point */
|
||||
tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
|
||||
if (WARN_ON_ONCE(tp == NULL)) {
|
||||
pr_warning("error on getting new probe.\n");
|
||||
warn++;
|
||||
} else
|
||||
probe_event_enable(&tp->call);
|
||||
}
|
||||
|
||||
ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
|
||||
"$retval");
|
||||
if (WARN_ON_ONCE(ret))
|
||||
pr_warning("error enabling function return\n");
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
pr_warning("error on probing function return.\n");
|
||||
warn++;
|
||||
} else {
|
||||
/* Enable trace point */
|
||||
tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||
if (WARN_ON_ONCE(tp == NULL)) {
|
||||
pr_warning("error on getting new probe.\n");
|
||||
warn++;
|
||||
} else
|
||||
probe_event_enable(&tp->call);
|
||||
}
|
||||
|
||||
if (warn)
|
||||
goto end;
|
||||
|
||||
ret = target(1, 2, 3, 4, 5, 6);
|
||||
|
||||
cleanup_all_probes();
|
||||
ret = command_trace_probe("-:testprobe");
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
pr_warning("error on deleting a probe.\n");
|
||||
warn++;
|
||||
}
|
||||
|
||||
pr_cont("OK\n");
|
||||
ret = command_trace_probe("-:testprobe2");
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
pr_warning("error on deleting a probe.\n");
|
||||
warn++;
|
||||
}
|
||||
|
||||
end:
|
||||
cleanup_all_probes();
|
||||
if (warn)
|
||||
pr_cont("NG: Some tests are failed. Please check them.\n");
|
||||
else
|
||||
pr_cont("OK\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -426,7 +426,7 @@ int __init init_ftrace_syscalls(void)
|
||||
}
|
||||
core_initcall(init_ftrace_syscalls);
|
||||
|
||||
#ifdef CONFIG_EVENT_PROFILE
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
|
||||
static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
|
||||
@ -438,12 +438,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
|
||||
struct syscall_metadata *sys_data;
|
||||
struct syscall_trace_enter *rec;
|
||||
unsigned long flags;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int syscall_nr;
|
||||
int rctx;
|
||||
int size;
|
||||
int cpu;
|
||||
|
||||
syscall_nr = syscall_get_nr(current, regs);
|
||||
if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
|
||||
@ -462,37 +459,15 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
|
||||
"profile buffer not large enough"))
|
||||
return;
|
||||
|
||||
/* Protect the per cpu buffer, begin the rcu read side */
|
||||
local_irq_save(flags);
|
||||
rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size,
|
||||
sys_data->enter_event->id, &rctx, &flags);
|
||||
if (!rec)
|
||||
return;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
goto end_recursion;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, cpu);
|
||||
|
||||
/* zero the dead bytes from align to not leak stack to user */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
|
||||
rec = (struct syscall_trace_enter *) raw_data;
|
||||
tracing_generic_entry_update(&rec->ent, 0, 0);
|
||||
rec->ent.type = sys_data->enter_event->id;
|
||||
rec->nr = syscall_nr;
|
||||
syscall_get_arguments(current, regs, 0, sys_data->nb_args,
|
||||
(unsigned long *)&rec->args);
|
||||
perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
|
||||
|
||||
end:
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
end_recursion:
|
||||
local_irq_restore(flags);
|
||||
ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
|
||||
}
|
||||
|
||||
int prof_sysenter_enable(struct ftrace_event_call *call)
|
||||
@ -536,11 +511,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
|
||||
struct syscall_trace_exit *rec;
|
||||
unsigned long flags;
|
||||
int syscall_nr;
|
||||
char *trace_buf;
|
||||
char *raw_data;
|
||||
int rctx;
|
||||
int size;
|
||||
int cpu;
|
||||
|
||||
syscall_nr = syscall_get_nr(current, regs);
|
||||
if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
|
||||
@ -562,38 +534,15 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
|
||||
"exit event has grown above profile buffer size"))
|
||||
return;
|
||||
|
||||
/* Protect the per cpu buffer, begin the rcu read side */
|
||||
local_irq_save(flags);
|
||||
rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size,
|
||||
sys_data->exit_event->id, &rctx, &flags);
|
||||
if (!rec)
|
||||
return;
|
||||
|
||||
rctx = perf_swevent_get_recursion_context();
|
||||
if (rctx < 0)
|
||||
goto end_recursion;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto end;
|
||||
|
||||
raw_data = per_cpu_ptr(trace_buf, cpu);
|
||||
|
||||
/* zero the dead bytes from align to not leak stack to user */
|
||||
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
||||
|
||||
rec = (struct syscall_trace_exit *)raw_data;
|
||||
|
||||
tracing_generic_entry_update(&rec->ent, 0, 0);
|
||||
rec->ent.type = sys_data->exit_event->id;
|
||||
rec->nr = syscall_nr;
|
||||
rec->ret = syscall_get_return_value(current, regs);
|
||||
|
||||
perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
|
||||
|
||||
end:
|
||||
perf_swevent_put_recursion_context(rctx);
|
||||
end_recursion:
|
||||
local_irq_restore(flags);
|
||||
ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags);
|
||||
}
|
||||
|
||||
int prof_sysexit_enable(struct ftrace_event_call *call)
|
||||
@ -631,6 +580,5 @@ void prof_sysexit_disable(struct ftrace_event_call *call)
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
|
1
tools/perf/.gitignore
vendored
1
tools/perf/.gitignore
vendored
@ -14,6 +14,7 @@ perf*.html
|
||||
common-cmds.h
|
||||
perf.data
|
||||
perf.data.old
|
||||
perf-archive
|
||||
tags
|
||||
TAGS
|
||||
cscope*
|
||||
|
22
tools/perf/Documentation/perf-archive.txt
Normal file
22
tools/perf/Documentation/perf-archive.txt
Normal file
@ -0,0 +1,22 @@
|
||||
perf-archive(1)
|
||||
===============
|
||||
|
||||
NAME
|
||||
----
|
||||
perf-archive - Create archive with object files with build-ids found in perf.data file
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
'perf archive' [file]
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
This command runs runs perf-buildid-list --with-hits, and collects the files
|
||||
with the buildids found so that analisys of perf.data contents can be possible
|
||||
on another machine.
|
||||
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkperf:perf-record[1], linkperf:perf-buildid-list[1], linkperf:perf-report[1]
|
33
tools/perf/Documentation/perf-buildid-cache.txt
Normal file
33
tools/perf/Documentation/perf-buildid-cache.txt
Normal file
@ -0,0 +1,33 @@
|
||||
perf-buildid-cache(1)
|
||||
=====================
|
||||
|
||||
NAME
|
||||
----
|
||||
perf-buildid-cache - Manage build-id cache.
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
'perf buildid-list <options>'
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
This command manages the build-id cache. It can add and remove files to the
|
||||
cache. In the future it should as well purge older entries, set upper limits
|
||||
for the space used by the cache, etc.
|
||||
|
||||
OPTIONS
|
||||
-------
|
||||
-a::
|
||||
--add=::
|
||||
Add specified file to the cache.
|
||||
-r::
|
||||
--remove=::
|
||||
Remove specified file to the cache.
|
||||
-v::
|
||||
--verbose::
|
||||
Be more verbose.
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkperf:perf-record[1], linkperf:perf-report[1]
|
@ -15,6 +15,8 @@ or
|
||||
'perf probe' [options] --del='[GROUP:]EVENT' [...]
|
||||
or
|
||||
'perf probe' --list
|
||||
or
|
||||
'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
@ -45,6 +47,11 @@ OPTIONS
|
||||
--list::
|
||||
List up current probe events.
|
||||
|
||||
-L::
|
||||
--line=::
|
||||
Show source code lines which can be probed. This needs an argument
|
||||
which specifies a range of the source code.
|
||||
|
||||
PROBE SYNTAX
|
||||
------------
|
||||
Probe points are defined by following syntax.
|
||||
@ -56,6 +63,19 @@ Probe points are defined by following syntax.
|
||||
It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number.
|
||||
'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
|
||||
|
||||
LINE SYNTAX
|
||||
-----------
|
||||
Line range is descripted by following syntax.
|
||||
|
||||
"FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]"
|
||||
|
||||
FUNC specifies the function name of showing lines. 'RLN' is the start line
|
||||
number from function entry line, and 'RLN2' is the end line number. As same as
|
||||
probe syntax, 'SRC' means the source file path, 'ALN' is start line number,
|
||||
and 'ALN2' is end line number in the file. It is also possible to specify how
|
||||
many lines to show by using 'NUM'.
|
||||
So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkperf:perf-trace[1], linkperf:perf-record[1]
|
||||
|
@ -74,7 +74,7 @@ OPTIONS
|
||||
|
||||
-s <symbol>::
|
||||
--sym-annotate=<symbol>::
|
||||
Annotate this symbol. Requires -k option.
|
||||
Annotate this symbol.
|
||||
|
||||
-v::
|
||||
--verbose::
|
||||
|
@ -8,7 +8,7 @@ perf-trace-perl - Process trace data with a Perl script
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
'perf trace' [-s [lang]:script[.ext] ]
|
||||
'perf trace' [-s [Perl]:script[.pl] ]
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
625
tools/perf/Documentation/perf-trace-python.txt
Normal file
625
tools/perf/Documentation/perf-trace-python.txt
Normal file
@ -0,0 +1,625 @@
|
||||
perf-trace-python(1)
|
||||
==================
|
||||
|
||||
NAME
|
||||
----
|
||||
perf-trace-python - Process trace data with a Python script
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
[verse]
|
||||
'perf trace' [-s [Python]:script[.py] ]
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
||||
This perf trace option is used to process perf trace data using perf's
|
||||
built-in Python interpreter. It reads and processes the input file and
|
||||
displays the results of the trace analysis implemented in the given
|
||||
Python script, if any.
|
||||
|
||||
A QUICK EXAMPLE
|
||||
---------------
|
||||
|
||||
This section shows the process, start to finish, of creating a working
|
||||
Python script that aggregates and extracts useful information from a
|
||||
raw perf trace stream. You can avoid reading the rest of this
|
||||
document if an example is enough for you; the rest of the document
|
||||
provides more details on each step and lists the library functions
|
||||
available to script writers.
|
||||
|
||||
This example actually details the steps that were used to create the
|
||||
'syscall-counts' script you see when you list the available perf trace
|
||||
scripts via 'perf trace -l'. As such, this script also shows how to
|
||||
integrate your script into the list of general-purpose 'perf trace'
|
||||
scripts listed by that command.
|
||||
|
||||
The syscall-counts script is a simple script, but demonstrates all the
|
||||
basic ideas necessary to create a useful script. Here's an example
|
||||
of its output (syscall names are not yet supported, they will appear
|
||||
as numbers):
|
||||
|
||||
----
|
||||
syscall events:
|
||||
|
||||
event count
|
||||
---------------------------------------- -----------
|
||||
sys_write 455067
|
||||
sys_getdents 4072
|
||||
sys_close 3037
|
||||
sys_swapoff 1769
|
||||
sys_read 923
|
||||
sys_sched_setparam 826
|
||||
sys_open 331
|
||||
sys_newfstat 326
|
||||
sys_mmap 217
|
||||
sys_munmap 216
|
||||
sys_futex 141
|
||||
sys_select 102
|
||||
sys_poll 84
|
||||
sys_setitimer 12
|
||||
sys_writev 8
|
||||
15 8
|
||||
sys_lseek 7
|
||||
sys_rt_sigprocmask 6
|
||||
sys_wait4 3
|
||||
sys_ioctl 3
|
||||
sys_set_robust_list 1
|
||||
sys_exit 1
|
||||
56 1
|
||||
sys_access 1
|
||||
----
|
||||
|
||||
Basically our task is to keep a per-syscall tally that gets updated
|
||||
every time a system call occurs in the system. Our script will do
|
||||
that, but first we need to record the data that will be processed by
|
||||
that script. Theoretically, there are a couple of ways we could do
|
||||
that:
|
||||
|
||||
- we could enable every event under the tracing/events/syscalls
|
||||
directory, but this is over 600 syscalls, well beyond the number
|
||||
allowable by perf. These individual syscall events will however be
|
||||
useful if we want to later use the guidance we get from the
|
||||
general-purpose scripts to drill down and get more detail about
|
||||
individual syscalls of interest.
|
||||
|
||||
- we can enable the sys_enter and/or sys_exit syscalls found under
|
||||
tracing/events/raw_syscalls. These are called for all syscalls; the
|
||||
'id' field can be used to distinguish between individual syscall
|
||||
numbers.
|
||||
|
||||
For this script, we only need to know that a syscall was entered; we
|
||||
don't care how it exited, so we'll use 'perf record' to record only
|
||||
the sys_enter events:
|
||||
|
||||
----
|
||||
# perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
|
||||
|
||||
^C[ perf record: Woken up 1 times to write data ]
|
||||
[ perf record: Captured and wrote 56.545 MB perf.data (~2470503 samples) ]
|
||||
----
|
||||
|
||||
The options basically say to collect data for every syscall event
|
||||
system-wide and multiplex the per-cpu output into a single stream.
|
||||
That single stream will be recorded in a file in the current directory
|
||||
called perf.data.
|
||||
|
||||
Once we have a perf.data file containing our data, we can use the -g
|
||||
'perf trace' option to generate a Python script that will contain a
|
||||
callback handler for each event type found in the perf.data trace
|
||||
stream (for more details, see the STARTER SCRIPTS section).
|
||||
|
||||
----
|
||||
# perf trace -g python
|
||||
generated Python script: perf-trace.py
|
||||
|
||||
The output file created also in the current directory is named
|
||||
perf-trace.py. Here's the file in its entirety:
|
||||
|
||||
# perf trace event handlers, generated by perf trace -g python
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
|
||||
# The common_* event handler fields are the most useful fields common to
|
||||
# all events. They don't necessarily correspond to the 'common_*' fields
|
||||
# in the format files. Those fields not available as handler params can
|
||||
# be retrieved using Python functions of the form common_*(context).
|
||||
# See the perf-trace-python Documentation for the list of available functions.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
|
||||
def trace_begin():
|
||||
print "in trace_begin"
|
||||
|
||||
def trace_end():
|
||||
print "in trace_end"
|
||||
|
||||
def raw_syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, args):
|
||||
print_header(event_name, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm)
|
||||
|
||||
print "id=%d, args=%s\n" % \
|
||||
(id, args),
|
||||
|
||||
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm):
|
||||
print_header(event_name, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm)
|
||||
|
||||
def print_header(event_name, cpu, secs, nsecs, pid, comm):
|
||||
print "%-20s %5u %05u.%09u %8u %-20s " % \
|
||||
(event_name, cpu, secs, nsecs, pid, comm),
|
||||
----
|
||||
|
||||
At the top is a comment block followed by some import statements and a
|
||||
path append which every perf trace script should include.
|
||||
|
||||
Following that are a couple generated functions, trace_begin() and
|
||||
trace_end(), which are called at the beginning and the end of the
|
||||
script respectively (for more details, see the SCRIPT_LAYOUT section
|
||||
below).
|
||||
|
||||
Following those are the 'event handler' functions generated one for
|
||||
every event in the 'perf record' output. The handler functions take
|
||||
the form subsystem__event_name, and contain named parameters, one for
|
||||
each field in the event; in this case, there's only one event,
|
||||
raw_syscalls__sys_enter(). (see the EVENT HANDLERS section below for
|
||||
more info on event handlers).
|
||||
|
||||
The final couple of functions are, like the begin and end functions,
|
||||
generated for every script. The first, trace_unhandled(), is called
|
||||
every time the script finds an event in the perf.data file that
|
||||
doesn't correspond to any event handler in the script. This could
|
||||
mean either that the record step recorded event types that it wasn't
|
||||
really interested in, or the script was run against a trace file that
|
||||
doesn't correspond to the script.
|
||||
|
||||
The script generated by -g option option simply prints a line for each
|
||||
event found in the trace stream i.e. it basically just dumps the event
|
||||
and its parameter values to stdout. The print_header() function is
|
||||
simply a utility function used for that purpose. Let's rename the
|
||||
script and run it to see the default output:
|
||||
|
||||
----
|
||||
# mv perf-trace.py syscall-counts.py
|
||||
# perf trace -s syscall-counts.py
|
||||
|
||||
raw_syscalls__sys_enter 1 00840.847582083 7506 perf id=1, args=
|
||||
raw_syscalls__sys_enter 1 00840.847595764 7506 perf id=1, args=
|
||||
raw_syscalls__sys_enter 1 00840.847620860 7506 perf id=1, args=
|
||||
raw_syscalls__sys_enter 1 00840.847710478 6533 npviewer.bin id=78, args=
|
||||
raw_syscalls__sys_enter 1 00840.847719204 6533 npviewer.bin id=142, args=
|
||||
raw_syscalls__sys_enter 1 00840.847755445 6533 npviewer.bin id=3, args=
|
||||
raw_syscalls__sys_enter 1 00840.847775601 6533 npviewer.bin id=3, args=
|
||||
raw_syscalls__sys_enter 1 00840.847781820 6533 npviewer.bin id=3, args=
|
||||
.
|
||||
.
|
||||
.
|
||||
----
|
||||
|
||||
Of course, for this script, we're not interested in printing every
|
||||
trace event, but rather aggregating it in a useful way. So we'll get
|
||||
rid of everything to do with printing as well as the trace_begin() and
|
||||
trace_unhandled() functions, which we won't be using. That leaves us
|
||||
with this minimalistic skeleton:
|
||||
|
||||
----
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
|
||||
def trace_end():
|
||||
print "in trace_end"
|
||||
|
||||
def raw_syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, args):
|
||||
----
|
||||
|
||||
In trace_end(), we'll simply print the results, but first we need to
|
||||
generate some results to print. To do that we need to have our
|
||||
sys_enter() handler do the necessary tallying until all events have
|
||||
been counted. A hash table indexed by syscall id is a good way to
|
||||
store that information; every time the sys_enter() handler is called,
|
||||
we simply increment a count associated with that hash entry indexed by
|
||||
that syscall id:
|
||||
|
||||
----
|
||||
syscalls = autodict()
|
||||
|
||||
try:
|
||||
syscalls[id] += 1
|
||||
except TypeError:
|
||||
syscalls[id] = 1
|
||||
----
|
||||
|
||||
The syscalls 'autodict' object is a special kind of Python dictionary
|
||||
(implemented in Core.py) that implements Perl's 'autovivifying' hashes
|
||||
in Python i.e. with autovivifying hashes, you can assign nested hash
|
||||
values without having to go to the trouble of creating intermediate
|
||||
levels if they don't exist e.g syscalls[comm][pid][id] = 1 will create
|
||||
the intermediate hash levels and finally assign the value 1 to the
|
||||
hash entry for 'id' (because the value being assigned isn't a hash
|
||||
object itself, the initial value is assigned in the TypeError
|
||||
exception. Well, there may be a better way to do this in Python but
|
||||
that's what works for now).
|
||||
|
||||
Putting that code into the raw_syscalls__sys_enter() handler, we
|
||||
effectively end up with a single-level dictionary keyed on syscall id
|
||||
and having the counts we've tallied as values.
|
||||
|
||||
The print_syscall_totals() function iterates over the entries in the
|
||||
dictionary and displays a line for each entry containing the syscall
|
||||
name (the dictonary keys contain the syscall ids, which are passed to
|
||||
the Util function syscall_name(), which translates the raw syscall
|
||||
numbers to the corresponding syscall name strings). The output is
|
||||
displayed after all the events in the trace have been processed, by
|
||||
calling the print_syscall_totals() function from the trace_end()
|
||||
handler called at the end of script processing.
|
||||
|
||||
The final script producing the output shown above is shown in its
|
||||
entirety below (syscall_name() helper is not yet available, you can
|
||||
only deal with id's for now):
|
||||
|
||||
----
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
from Util import *
|
||||
|
||||
syscalls = autodict()
|
||||
|
||||
def trace_end():
|
||||
print_syscall_totals()
|
||||
|
||||
def raw_syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, args):
|
||||
try:
|
||||
syscalls[id] += 1
|
||||
except TypeError:
|
||||
syscalls[id] = 1
|
||||
|
||||
def print_syscall_totals():
|
||||
if for_comm is not None:
|
||||
print "\nsyscall events for %s:\n\n" % (for_comm),
|
||||
else:
|
||||
print "\nsyscall events:\n\n",
|
||||
|
||||
print "%-40s %10s\n" % ("event", "count"),
|
||||
print "%-40s %10s\n" % ("----------------------------------------", \
|
||||
"-----------"),
|
||||
|
||||
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
|
||||
reverse = True):
|
||||
print "%-40s %10d\n" % (syscall_name(id), val),
|
||||
----
|
||||
|
||||
The script can be run just as before:
|
||||
|
||||
# perf trace -s syscall-counts.py
|
||||
|
||||
So those are the essential steps in writing and running a script. The
|
||||
process can be generalized to any tracepoint or set of tracepoints
|
||||
you're interested in - basically find the tracepoint(s) you're
|
||||
interested in by looking at the list of available events shown by
|
||||
'perf list' and/or look in /sys/kernel/debug/tracing events for
|
||||
detailed event and field info, record the corresponding trace data
|
||||
using 'perf record', passing it the list of interesting events,
|
||||
generate a skeleton script using 'perf trace -g python' and modify the
|
||||
code to aggregate and display it for your particular needs.
|
||||
|
||||
After you've done that you may end up with a general-purpose script
|
||||
that you want to keep around and have available for future use. By
|
||||
writing a couple of very simple shell scripts and putting them in the
|
||||
right place, you can have your script listed alongside the other
|
||||
scripts listed by the 'perf trace -l' command e.g.:
|
||||
|
||||
----
|
||||
root@tropicana:~# perf trace -l
|
||||
List of available trace scripts:
|
||||
workqueue-stats workqueue stats (ins/exe/create/destroy)
|
||||
wakeup-latency system-wide min/max/avg wakeup latency
|
||||
rw-by-file <comm> r/w activity for a program, by file
|
||||
rw-by-pid system-wide r/w activity
|
||||
----
|
||||
|
||||
A nice side effect of doing this is that you also then capture the
|
||||
probably lengthy 'perf record' command needed to record the events for
|
||||
the script.
|
||||
|
||||
To have the script appear as a 'built-in' script, you write two simple
|
||||
scripts, one for recording and one for 'reporting'.
|
||||
|
||||
The 'record' script is a shell script with the same base name as your
|
||||
script, but with -record appended. The shell script should be put
|
||||
into the perf/scripts/python/bin directory in the kernel source tree.
|
||||
In that script, you write the 'perf record' command-line needed for
|
||||
your script:
|
||||
|
||||
----
|
||||
# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-record
|
||||
|
||||
#!/bin/bash
|
||||
perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
|
||||
----
|
||||
|
||||
The 'report' script is also a shell script with the same base name as
|
||||
your script, but with -report appended. It should also be located in
|
||||
the perf/scripts/python/bin directory. In that script, you write the
|
||||
'perf trace -s' command-line needed for running your script:
|
||||
|
||||
----
|
||||
# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-report
|
||||
|
||||
#!/bin/bash
|
||||
# description: system-wide syscall counts
|
||||
perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py
|
||||
----
|
||||
|
||||
Note that the location of the Python script given in the shell script
|
||||
is in the libexec/perf-core/scripts/python directory - this is where
|
||||
the script will be copied by 'make install' when you install perf.
|
||||
For the installation to install your script there, your script needs
|
||||
to be located in the perf/scripts/python directory in the kernel
|
||||
source tree:
|
||||
|
||||
----
|
||||
# ls -al kernel-source/tools/perf/scripts/python
|
||||
|
||||
root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
|
||||
total 32
|
||||
drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
|
||||
drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
|
||||
drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin
|
||||
-rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-trace.py
|
||||
drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util
|
||||
-rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py
|
||||
----
|
||||
|
||||
Once you've done that (don't forget to do a new 'make install',
|
||||
otherwise your script won't show up at run-time), 'perf trace -l'
|
||||
should show a new entry for your script:
|
||||
|
||||
----
|
||||
root@tropicana:~# perf trace -l
|
||||
List of available trace scripts:
|
||||
workqueue-stats workqueue stats (ins/exe/create/destroy)
|
||||
wakeup-latency system-wide min/max/avg wakeup latency
|
||||
rw-by-file <comm> r/w activity for a program, by file
|
||||
rw-by-pid system-wide r/w activity
|
||||
syscall-counts system-wide syscall counts
|
||||
----
|
||||
|
||||
You can now perform the record step via 'perf trace record':
|
||||
|
||||
# perf trace record syscall-counts
|
||||
|
||||
and display the output using 'perf trace report':
|
||||
|
||||
# perf trace report syscall-counts
|
||||
|
||||
STARTER SCRIPTS
|
||||
---------------
|
||||
|
||||
You can quickly get started writing a script for a particular set of
|
||||
trace data by generating a skeleton script using 'perf trace -g
|
||||
python' in the same directory as an existing perf.data trace file.
|
||||
That will generate a starter script containing a handler for each of
|
||||
the event types in the trace file; it simply prints every available
|
||||
field for each event in the trace file.
|
||||
|
||||
You can also look at the existing scripts in
|
||||
~/libexec/perf-core/scripts/python for typical examples showing how to
|
||||
do basic things like aggregate event data, print results, etc. Also,
|
||||
the check-perf-trace.py script, while not interesting for its results,
|
||||
attempts to exercise all of the main scripting features.
|
||||
|
||||
EVENT HANDLERS
|
||||
--------------
|
||||
|
||||
When perf trace is invoked using a trace script, a user-defined
|
||||
'handler function' is called for each event in the trace. If there's
|
||||
no handler function defined for a given event type, the event is
|
||||
ignored (or passed to a 'trace_handled' function, see below) and the
|
||||
next event is processed.
|
||||
|
||||
Most of the event's field values are passed as arguments to the
|
||||
handler function; some of the less common ones aren't - those are
|
||||
available as calls back into the perf executable (see below).
|
||||
|
||||
As an example, the following perf record command can be used to record
|
||||
all sched_wakeup events in the system:
|
||||
|
||||
# perf record -c 1 -f -a -M -R -e sched:sched_wakeup
|
||||
|
||||
Traces meant to be processed using a script should be recorded with
|
||||
the above options: -c 1 says to sample every event, -a to enable
|
||||
system-wide collection, -M to multiplex the output, and -R to collect
|
||||
raw samples.
|
||||
|
||||
The format file for the sched_wakep event defines the following fields
|
||||
(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
|
||||
|
||||
----
|
||||
format:
|
||||
field:unsigned short common_type;
|
||||
field:unsigned char common_flags;
|
||||
field:unsigned char common_preempt_count;
|
||||
field:int common_pid;
|
||||
field:int common_lock_depth;
|
||||
|
||||
field:char comm[TASK_COMM_LEN];
|
||||
field:pid_t pid;
|
||||
field:int prio;
|
||||
field:int success;
|
||||
field:int target_cpu;
|
||||
----
|
||||
|
||||
The handler function for this event would be defined as:
|
||||
|
||||
----
|
||||
def sched__sched_wakeup(event_name, context, common_cpu, common_secs,
|
||||
common_nsecs, common_pid, common_comm,
|
||||
comm, pid, prio, success, target_cpu):
|
||||
pass
|
||||
----
|
||||
|
||||
The handler function takes the form subsystem__event_name.
|
||||
|
||||
The common_* arguments in the handler's argument list are the set of
|
||||
arguments passed to all event handlers; some of the fields correspond
|
||||
to the common_* fields in the format file, but some are synthesized,
|
||||
and some of the common_* fields aren't common enough to to be passed
|
||||
to every event as arguments but are available as library functions.
|
||||
|
||||
Here's a brief description of each of the invariant event args:
|
||||
|
||||
event_name the name of the event as text
|
||||
context an opaque 'cookie' used in calls back into perf
|
||||
common_cpu the cpu the event occurred on
|
||||
common_secs the secs portion of the event timestamp
|
||||
common_nsecs the nsecs portion of the event timestamp
|
||||
common_pid the pid of the current task
|
||||
common_comm the name of the current process
|
||||
|
||||
All of the remaining fields in the event's format file have
|
||||
counterparts as handler function arguments of the same name, as can be
|
||||
seen in the example above.
|
||||
|
||||
The above provides the basics needed to directly access every field of
|
||||
every event in a trace, which covers 90% of what you need to know to
|
||||
write a useful trace script. The sections below cover the rest.
|
||||
|
||||
SCRIPT LAYOUT
|
||||
-------------
|
||||
|
||||
Every perf trace Python script should start by setting up a Python
|
||||
module search path and 'import'ing a few support modules (see module
|
||||
descriptions below):
|
||||
|
||||
----
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
----
|
||||
|
||||
The rest of the script can contain handler functions and support
|
||||
functions in any order.
|
||||
|
||||
Aside from the event handler functions discussed above, every script
|
||||
can implement a set of optional functions:
|
||||
|
||||
*trace_begin*, if defined, is called before any event is processed and
|
||||
gives scripts a chance to do setup tasks:
|
||||
|
||||
----
|
||||
def trace_begin:
|
||||
pass
|
||||
----
|
||||
|
||||
*trace_end*, if defined, is called after all events have been
|
||||
processed and gives scripts a chance to do end-of-script tasks, such
|
||||
as display results:
|
||||
|
||||
----
|
||||
def trace_end:
|
||||
pass
|
||||
----
|
||||
|
||||
*trace_unhandled*, if defined, is called after for any event that
|
||||
doesn't have a handler explicitly defined for it. The standard set
|
||||
of common arguments are passed into it:
|
||||
|
||||
----
|
||||
def trace_unhandled(event_name, context, common_cpu, common_secs,
|
||||
common_nsecs, common_pid, common_comm):
|
||||
pass
|
||||
----
|
||||
|
||||
The remaining sections provide descriptions of each of the available
|
||||
built-in perf trace Python modules and their associated functions.
|
||||
|
||||
AVAILABLE MODULES AND FUNCTIONS
|
||||
-------------------------------
|
||||
|
||||
The following sections describe the functions and variables available
|
||||
via the various perf trace Python modules. To use the functions and
|
||||
variables from the given module, add the corresponding 'from XXXX
|
||||
import' line to your perf trace script.
|
||||
|
||||
Core.py Module
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
These functions provide some essential functions to user scripts.
|
||||
|
||||
The *flag_str* and *symbol_str* functions provide human-readable
|
||||
strings for flag and symbolic fields. These correspond to the strings
|
||||
and values parsed from the 'print fmt' fields of the event format
|
||||
files:
|
||||
|
||||
flag_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the flag field field_name of event event_name
|
||||
symbol_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the symbolic field field_name of event event_name
|
||||
|
||||
The *autodict* function returns a special special kind of Python
|
||||
dictionary that implements Perl's 'autovivifying' hashes in Python
|
||||
i.e. with autovivifying hashes, you can assign nested hash values
|
||||
without having to go to the trouble of creating intermediate levels if
|
||||
they don't exist.
|
||||
|
||||
autodict() - returns an autovivifying dictionary instance
|
||||
|
||||
|
||||
perf_trace_context Module
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Some of the 'common' fields in the event format file aren't all that
|
||||
common, but need to be made accessible to user scripts nonetheless.
|
||||
|
||||
perf_trace_context defines a set of functions that can be used to
|
||||
access this data in the context of the current event. Each of these
|
||||
functions expects a context variable, which is the same as the
|
||||
context variable passed into every event handler as the second
|
||||
argument.
|
||||
|
||||
common_pc(context) - returns common_preempt count for the current event
|
||||
common_flags(context) - returns common_flags for the current event
|
||||
common_lock_depth(context) - returns common_lock_depth for the current event
|
||||
|
||||
Util.py Module
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Various utility functions for use with perf trace:
|
||||
|
||||
nsecs(secs, nsecs) - returns total nsecs given secs/nsecs pair
|
||||
nsecs_secs(nsecs) - returns whole secs portion given nsecs
|
||||
nsecs_nsecs(nsecs) - returns nsecs remainder given nsecs
|
||||
nsecs_str(nsecs) - returns printable string in the form secs.nsecs
|
||||
avg(total, n) - returns average given a sum and a total number of values
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkperf:perf-trace[1]
|
@ -19,6 +19,11 @@ There are several variants of perf trace:
|
||||
'perf trace' to see a detailed trace of the workload that was
|
||||
recorded.
|
||||
|
||||
You can also run a set of pre-canned scripts that aggregate and
|
||||
summarize the raw trace data in various ways (the list of scripts is
|
||||
available via 'perf trace -l'). The following variants allow you to
|
||||
record and run those scripts:
|
||||
|
||||
'perf trace record <script>' to record the events required for 'perf
|
||||
trace report'. <script> is the name displayed in the output of
|
||||
'perf trace --list' i.e. the actual script name minus any language
|
||||
@ -31,6 +36,9 @@ There are several variants of perf trace:
|
||||
record <script>' is used and should be present for this command to
|
||||
succeed.
|
||||
|
||||
See the 'SEE ALSO' section for links to language-specific
|
||||
information on how to write and run your own trace scripts.
|
||||
|
||||
OPTIONS
|
||||
-------
|
||||
-D::
|
||||
@ -45,9 +53,11 @@ OPTIONS
|
||||
--list=::
|
||||
Display a list of available trace scripts.
|
||||
|
||||
-s::
|
||||
-s ['lang']::
|
||||
--script=::
|
||||
Process trace data with the given script ([lang]:script[.ext]).
|
||||
If the string 'lang' is specified in place of a script name, a
|
||||
list of supported languages will be displayed instead.
|
||||
|
||||
-g::
|
||||
--gen-script=::
|
||||
@ -56,4 +66,5 @@ OPTIONS
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
linkperf:perf-record[1], linkperf:perf-trace-perl[1]
|
||||
linkperf:perf-record[1], linkperf:perf-trace-perl[1],
|
||||
linkperf:perf-trace-python[1]
|
||||
|
@ -12,7 +12,7 @@ SYNOPSIS
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
Performance counters for Linux are are a new kernel-based subsystem
|
||||
Performance counters for Linux are a new kernel-based subsystem
|
||||
that provide a framework for all things performance analysis. It
|
||||
covers hardware level (CPU/PMU, Performance Monitoring Unit) features
|
||||
and software features (software counters, tracepoints) as well.
|
||||
|
@ -286,11 +286,7 @@ SCRIPT_PERL =
|
||||
SCRIPT_SH =
|
||||
TEST_PROGRAMS =
|
||||
|
||||
#
|
||||
# No scripts right now:
|
||||
#
|
||||
|
||||
# SCRIPT_SH += perf-am.sh
|
||||
SCRIPT_SH += perf-archive.sh
|
||||
|
||||
#
|
||||
# No Perl scripts right now:
|
||||
@ -315,9 +311,6 @@ PROGRAMS += perf
|
||||
# List built-in command $C whose implementation cmd_$C() is not in
|
||||
# builtin-$C.o but is linked in as part of some other command.
|
||||
#
|
||||
# None right now:
|
||||
#
|
||||
# BUILT_INS += perf-init $X
|
||||
|
||||
# what 'all' will build and 'install' will install, in perfexecdir
|
||||
ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
|
||||
@ -340,6 +333,7 @@ LIB_FILE=libperf.a
|
||||
LIB_H += ../../include/linux/perf_event.h
|
||||
LIB_H += ../../include/linux/rbtree.h
|
||||
LIB_H += ../../include/linux/list.h
|
||||
LIB_H += ../../include/linux/hash.h
|
||||
LIB_H += ../../include/linux/stringify.h
|
||||
LIB_H += util/include/linux/bitmap.h
|
||||
LIB_H += util/include/linux/bitops.h
|
||||
@ -363,12 +357,14 @@ LIB_H += util/include/asm/uaccess.h
|
||||
LIB_H += perf.h
|
||||
LIB_H += util/cache.h
|
||||
LIB_H += util/callchain.h
|
||||
LIB_H += util/build-id.h
|
||||
LIB_H += util/debug.h
|
||||
LIB_H += util/debugfs.h
|
||||
LIB_H += util/event.h
|
||||
LIB_H += util/exec_cmd.h
|
||||
LIB_H += util/types.h
|
||||
LIB_H += util/levenshtein.h
|
||||
LIB_H += util/map.h
|
||||
LIB_H += util/parse-options.h
|
||||
LIB_H += util/parse-events.h
|
||||
LIB_H += util/quote.h
|
||||
@ -389,12 +385,12 @@ LIB_H += util/sort.h
|
||||
LIB_H += util/hist.h
|
||||
LIB_H += util/thread.h
|
||||
LIB_H += util/trace-event.h
|
||||
LIB_H += util/trace-event-perl.h
|
||||
LIB_H += util/probe-finder.h
|
||||
LIB_H += util/probe-event.h
|
||||
|
||||
LIB_OBJS += util/abspath.o
|
||||
LIB_OBJS += util/alias.o
|
||||
LIB_OBJS += util/build-id.o
|
||||
LIB_OBJS += util/config.o
|
||||
LIB_OBJS += util/ctype.o
|
||||
LIB_OBJS += util/debugfs.o
|
||||
@ -431,12 +427,12 @@ LIB_OBJS += util/thread.o
|
||||
LIB_OBJS += util/trace-event-parse.o
|
||||
LIB_OBJS += util/trace-event-read.o
|
||||
LIB_OBJS += util/trace-event-info.o
|
||||
LIB_OBJS += util/trace-event-perl.o
|
||||
LIB_OBJS += util/trace-event-scripting.o
|
||||
LIB_OBJS += util/svghelper.o
|
||||
LIB_OBJS += util/sort.o
|
||||
LIB_OBJS += util/hist.o
|
||||
LIB_OBJS += util/data_map.o
|
||||
LIB_OBJS += util/probe-event.o
|
||||
LIB_OBJS += util/util.o
|
||||
|
||||
BUILTIN_OBJS += builtin-annotate.o
|
||||
|
||||
@ -451,6 +447,7 @@ BUILTIN_OBJS += builtin-diff.o
|
||||
BUILTIN_OBJS += builtin-help.o
|
||||
BUILTIN_OBJS += builtin-sched.o
|
||||
BUILTIN_OBJS += builtin-buildid-list.o
|
||||
BUILTIN_OBJS += builtin-buildid-cache.o
|
||||
BUILTIN_OBJS += builtin-list.o
|
||||
BUILTIN_OBJS += builtin-record.o
|
||||
BUILTIN_OBJS += builtin-report.o
|
||||
@ -460,6 +457,7 @@ BUILTIN_OBJS += builtin-top.o
|
||||
BUILTIN_OBJS += builtin-trace.o
|
||||
BUILTIN_OBJS += builtin-probe.o
|
||||
BUILTIN_OBJS += builtin-kmem.o
|
||||
BUILTIN_OBJS += builtin-lock.o
|
||||
|
||||
PERFLIBS = $(LIB_FILE)
|
||||
|
||||
@ -520,9 +518,23 @@ ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; e
|
||||
BASIC_CFLAGS += -DNO_LIBPERL
|
||||
else
|
||||
ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
|
||||
LIB_OBJS += util/scripting-engines/trace-event-perl.o
|
||||
LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o
|
||||
endif
|
||||
|
||||
ifndef NO_LIBPYTHON
|
||||
PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
|
||||
PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
|
||||
endif
|
||||
|
||||
ifneq ($(shell sh -c "(echo '\#include <Python.h>'; echo 'int main(void) { Py_Initialize(); return 0; }') | $(CC) -x c - $(PYTHON_EMBED_CCOPTS) -o /dev/null $(PYTHON_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
|
||||
BASIC_CFLAGS += -DNO_LIBPYTHON
|
||||
else
|
||||
ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
|
||||
LIB_OBJS += util/scripting-engines/trace-event-python.o
|
||||
LIB_OBJS += scripts/python/Perf-Trace-Util/Context.o
|
||||
endif
|
||||
|
||||
ifdef NO_DEMANGLE
|
||||
BASIC_CFLAGS += -DNO_DEMANGLE
|
||||
else
|
||||
@ -894,12 +906,18 @@ util/hweight.o: ../../lib/hweight.c PERF-CFLAGS
|
||||
util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
|
||||
|
||||
util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
|
||||
util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
|
||||
|
||||
scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
|
||||
|
||||
util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o util/scripting-engines/trace-event-python.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
|
||||
|
||||
scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c PERF-CFLAGS
|
||||
$(QUIET_CC)$(CC) -o scripts/python/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
|
||||
|
||||
perf-%$X: %.o $(PERFLIBS)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
|
||||
|
||||
@ -1009,9 +1027,16 @@ install: all
|
||||
$(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
|
||||
$(INSTALL) perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
||||
$(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
|
||||
$(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
|
||||
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
|
||||
$(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
|
||||
$(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
|
||||
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
|
||||
|
||||
ifdef BUILT_INS
|
||||
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
||||
$(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
|
||||
|
@ -53,32 +53,20 @@ struct sym_priv {
|
||||
|
||||
static const char *sym_hist_filter;
|
||||
|
||||
static int symbol_filter(struct map *map __used, struct symbol *sym)
|
||||
static int sym__alloc_hist(struct symbol *self)
|
||||
{
|
||||
if (sym_hist_filter == NULL ||
|
||||
strcmp(sym->name, sym_hist_filter) == 0) {
|
||||
struct sym_priv *priv = symbol__priv(sym);
|
||||
const int size = (sizeof(*priv->hist) +
|
||||
(sym->end - sym->start) * sizeof(u64));
|
||||
struct sym_priv *priv = symbol__priv(self);
|
||||
const int size = (sizeof(*priv->hist) +
|
||||
(self->end - self->start) * sizeof(u64));
|
||||
|
||||
priv->hist = malloc(size);
|
||||
if (priv->hist)
|
||||
memset(priv->hist, 0, size);
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* FIXME: We should really filter it out, as we don't want to go thru symbols
|
||||
* we're not interested, and if a DSO ends up with no symbols, delete it too,
|
||||
* but right now the kernel loading routines in symbol.c bail out if no symbols
|
||||
* are found, fix it later.
|
||||
*/
|
||||
return 0;
|
||||
priv->hist = zalloc(size);
|
||||
return priv->hist == NULL ? -1 : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* collect histogram counts
|
||||
*/
|
||||
static void hist_hit(struct hist_entry *he, u64 ip)
|
||||
static int annotate__hist_hit(struct hist_entry *he, u64 ip)
|
||||
{
|
||||
unsigned int sym_size, offset;
|
||||
struct symbol *sym = he->sym;
|
||||
@ -88,83 +76,127 @@ static void hist_hit(struct hist_entry *he, u64 ip)
|
||||
he->count++;
|
||||
|
||||
if (!sym || !he->map)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
priv = symbol__priv(sym);
|
||||
if (!priv->hist)
|
||||
return;
|
||||
if (priv->hist == NULL && sym__alloc_hist(sym) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
sym_size = sym->end - sym->start;
|
||||
offset = ip - sym->start;
|
||||
|
||||
if (verbose)
|
||||
fprintf(stderr, "%s: ip=%Lx\n", __func__,
|
||||
he->map->unmap_ip(he->map, ip));
|
||||
pr_debug3("%s: ip=%#Lx\n", __func__, he->map->unmap_ip(he->map, ip));
|
||||
|
||||
if (offset >= sym_size)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
h = priv->hist;
|
||||
h->sum++;
|
||||
h->ip[offset]++;
|
||||
|
||||
if (verbose >= 3)
|
||||
printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n",
|
||||
(void *)(unsigned long)he->sym->start,
|
||||
he->sym->name,
|
||||
(void *)(unsigned long)ip, ip - he->sym->start,
|
||||
h->ip[offset]);
|
||||
pr_debug3("%#Lx %s: count++ [ip: %#Lx, %#Lx] => %Ld\n", he->sym->start,
|
||||
he->sym->name, ip, ip - he->sym->start, h->ip[offset]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_session__add_hist_entry(struct perf_session *self,
|
||||
struct addr_location *al, u64 count)
|
||||
{
|
||||
bool hit;
|
||||
struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL,
|
||||
count, &hit);
|
||||
struct hist_entry *he;
|
||||
|
||||
if (sym_hist_filter != NULL &&
|
||||
(al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
|
||||
/* We're only interested in a symbol named sym_hist_filter */
|
||||
if (al->sym != NULL) {
|
||||
rb_erase(&al->sym->rb_node,
|
||||
&al->map->dso->symbols[al->map->type]);
|
||||
symbol__delete(al->sym);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
he = __perf_session__add_hist_entry(self, al, NULL, count, &hit);
|
||||
if (he == NULL)
|
||||
return -ENOMEM;
|
||||
hist_hit(he, al->addr);
|
||||
return 0;
|
||||
|
||||
return annotate__hist_hit(he, al->addr);
|
||||
}
|
||||
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
{
|
||||
struct addr_location al;
|
||||
|
||||
dump_printf("(IP, %d): %d: %p\n", event->header.misc,
|
||||
event->ip.pid, (void *)(long)event->ip.ip);
|
||||
dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc,
|
||||
event->ip.pid, event->ip.ip);
|
||||
|
||||
if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) {
|
||||
fprintf(stderr, "problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
if (event__preprocess_sample(event, session, &al, NULL) < 0) {
|
||||
pr_warning("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) {
|
||||
fprintf(stderr, "problem incrementing symbol count, "
|
||||
"skipping event\n");
|
||||
pr_warning("problem incrementing symbol count, "
|
||||
"skipping event\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_line(FILE *file, struct hist_entry *he, u64 len)
|
||||
struct objdump_line {
|
||||
struct list_head node;
|
||||
s64 offset;
|
||||
char *line;
|
||||
};
|
||||
|
||||
static struct objdump_line *objdump_line__new(s64 offset, char *line)
|
||||
{
|
||||
struct objdump_line *self = malloc(sizeof(*self));
|
||||
|
||||
if (self != NULL) {
|
||||
self->offset = offset;
|
||||
self->line = line;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
static void objdump_line__free(struct objdump_line *self)
|
||||
{
|
||||
free(self->line);
|
||||
free(self);
|
||||
}
|
||||
|
||||
static void objdump__add_line(struct list_head *head, struct objdump_line *line)
|
||||
{
|
||||
list_add_tail(&line->node, head);
|
||||
}
|
||||
|
||||
static struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
|
||||
struct objdump_line *pos)
|
||||
{
|
||||
list_for_each_entry_continue(pos, head, node)
|
||||
if (pos->offset >= 0)
|
||||
return pos;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int parse_line(FILE *file, struct hist_entry *he,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct symbol *sym = he->sym;
|
||||
struct objdump_line *objdump_line;
|
||||
char *line = NULL, *tmp, *tmp2;
|
||||
static const char *prev_line;
|
||||
static const char *prev_color;
|
||||
unsigned int offset;
|
||||
size_t line_len;
|
||||
u64 start;
|
||||
s64 line_ip;
|
||||
int ret;
|
||||
s64 line_ip, offset = -1;
|
||||
char *c;
|
||||
|
||||
if (getline(&line, &line_len, file) < 0)
|
||||
return -1;
|
||||
|
||||
if (!line)
|
||||
return -1;
|
||||
|
||||
@ -173,8 +205,6 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
|
||||
*c = 0;
|
||||
|
||||
line_ip = -1;
|
||||
offset = 0;
|
||||
ret = -2;
|
||||
|
||||
/*
|
||||
* Strip leading spaces:
|
||||
@ -195,9 +225,30 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
|
||||
line_ip = -1;
|
||||
}
|
||||
|
||||
start = he->map->unmap_ip(he->map, sym->start);
|
||||
|
||||
if (line_ip != -1) {
|
||||
u64 start = map__rip_2objdump(he->map, sym->start);
|
||||
offset = line_ip - start;
|
||||
}
|
||||
|
||||
objdump_line = objdump_line__new(offset, line);
|
||||
if (objdump_line == NULL) {
|
||||
free(line);
|
||||
return -1;
|
||||
}
|
||||
objdump__add_line(head, objdump_line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int objdump_line__print(struct objdump_line *self,
|
||||
struct list_head *head,
|
||||
struct hist_entry *he, u64 len)
|
||||
{
|
||||
struct symbol *sym = he->sym;
|
||||
static const char *prev_line;
|
||||
static const char *prev_color;
|
||||
|
||||
if (self->offset != -1) {
|
||||
const char *path = NULL;
|
||||
unsigned int hits = 0;
|
||||
double percent = 0.0;
|
||||
@ -205,15 +256,22 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
|
||||
struct sym_priv *priv = symbol__priv(sym);
|
||||
struct sym_ext *sym_ext = priv->ext;
|
||||
struct sym_hist *h = priv->hist;
|
||||
s64 offset = self->offset;
|
||||
struct objdump_line *next = objdump__get_next_ip_line(head, self);
|
||||
|
||||
offset = line_ip - start;
|
||||
if (offset < len)
|
||||
hits = h->ip[offset];
|
||||
while (offset < (s64)len &&
|
||||
(next == NULL || offset < next->offset)) {
|
||||
if (sym_ext) {
|
||||
if (path == NULL)
|
||||
path = sym_ext[offset].path;
|
||||
percent += sym_ext[offset].percent;
|
||||
} else
|
||||
hits += h->ip[offset];
|
||||
|
||||
if (offset < len && sym_ext) {
|
||||
path = sym_ext[offset].path;
|
||||
percent = sym_ext[offset].percent;
|
||||
} else if (h->sum)
|
||||
++offset;
|
||||
}
|
||||
|
||||
if (sym_ext == NULL && h->sum)
|
||||
percent = 100.0 * hits / h->sum;
|
||||
|
||||
color = get_percent_color(percent);
|
||||
@ -234,12 +292,12 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len)
|
||||
|
||||
color_fprintf(stdout, color, " %7.2f", percent);
|
||||
printf(" : ");
|
||||
color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", line);
|
||||
color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", self->line);
|
||||
} else {
|
||||
if (!*line)
|
||||
if (!*self->line)
|
||||
printf(" :\n");
|
||||
else
|
||||
printf(" : %s\n", line);
|
||||
printf(" : %s\n", self->line);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -365,6 +423,20 @@ static void print_summary(const char *filename)
|
||||
}
|
||||
}
|
||||
|
||||
static void hist_entry__print_hits(struct hist_entry *self)
|
||||
{
|
||||
struct symbol *sym = self->sym;
|
||||
struct sym_priv *priv = symbol__priv(sym);
|
||||
struct sym_hist *h = priv->hist;
|
||||
u64 len = sym->end - sym->start, offset;
|
||||
|
||||
for (offset = 0; offset < len; ++offset)
|
||||
if (h->ip[offset] != 0)
|
||||
printf("%*Lx: %Lu\n", BITS_PER_LONG / 2,
|
||||
sym->start + offset, h->ip[offset]);
|
||||
printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
|
||||
}
|
||||
|
||||
static void annotate_sym(struct hist_entry *he)
|
||||
{
|
||||
struct map *map = he->map;
|
||||
@ -374,15 +446,15 @@ static void annotate_sym(struct hist_entry *he)
|
||||
u64 len;
|
||||
char command[PATH_MAX*2];
|
||||
FILE *file;
|
||||
LIST_HEAD(head);
|
||||
struct objdump_line *pos, *n;
|
||||
|
||||
if (!filename)
|
||||
return;
|
||||
|
||||
if (verbose)
|
||||
fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n",
|
||||
__func__, filename, sym->name,
|
||||
map->unmap_ip(map, sym->start),
|
||||
map->unmap_ip(map, sym->end));
|
||||
pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
|
||||
filename, sym->name, map->unmap_ip(map, sym->start),
|
||||
map->unmap_ip(map, sym->end));
|
||||
|
||||
if (full_paths)
|
||||
d_filename = filename;
|
||||
@ -405,7 +477,8 @@ static void annotate_sym(struct hist_entry *he)
|
||||
dso, dso->long_name, sym, sym->name);
|
||||
|
||||
sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s",
|
||||
map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end),
|
||||
map__rip_2objdump(map, sym->start),
|
||||
map__rip_2objdump(map, sym->end),
|
||||
filename, filename);
|
||||
|
||||
if (verbose >= 3)
|
||||
@ -416,11 +489,21 @@ static void annotate_sym(struct hist_entry *he)
|
||||
return;
|
||||
|
||||
while (!feof(file)) {
|
||||
if (parse_line(file, he, len) < 0)
|
||||
if (parse_line(file, he, &head) < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
pclose(file);
|
||||
|
||||
if (verbose)
|
||||
hist_entry__print_hits(he);
|
||||
|
||||
list_for_each_entry_safe(pos, n, &head, node) {
|
||||
objdump_line__print(pos, &head, he, len);
|
||||
list_del(&pos->node);
|
||||
objdump_line__free(pos);
|
||||
}
|
||||
|
||||
if (print_line)
|
||||
free_source_line(he, len);
|
||||
}
|
||||
@ -451,10 +534,10 @@ static void perf_session__find_annotations(struct perf_session *self)
|
||||
}
|
||||
|
||||
static struct perf_event_ops event_ops = {
|
||||
.process_sample_event = process_sample_event,
|
||||
.process_mmap_event = event__process_mmap,
|
||||
.process_comm_event = event__process_comm,
|
||||
.process_fork_event = event__process_task,
|
||||
.sample = process_sample_event,
|
||||
.mmap = event__process_mmap,
|
||||
.comm = event__process_comm,
|
||||
.fork = event__process_task,
|
||||
};
|
||||
|
||||
static int __cmd_annotate(void)
|
||||
@ -542,9 +625,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
|
||||
setup_pager();
|
||||
|
||||
if (field_sep && *field_sep == '.') {
|
||||
fputs("'.' is the only non valid --field-separator argument\n",
|
||||
stderr);
|
||||
exit(129);
|
||||
pr_err("'.' is the only non valid --field-separator argument\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return __cmd_annotate();
|
||||
|
133
tools/perf/builtin-buildid-cache.c
Normal file
133
tools/perf/builtin-buildid-cache.c
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* builtin-buildid-cache.c
|
||||
*
|
||||
* Builtin buildid-cache command: Manages build-id cache
|
||||
*
|
||||
* Copyright (C) 2010, Red Hat Inc.
|
||||
* Copyright (C) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*/
|
||||
#include "builtin.h"
|
||||
#include "perf.h"
|
||||
#include "util/cache.h"
|
||||
#include "util/debug.h"
|
||||
#include "util/header.h"
|
||||
#include "util/parse-options.h"
|
||||
#include "util/strlist.h"
|
||||
#include "util/symbol.h"
|
||||
|
||||
static char const *add_name_list_str, *remove_name_list_str;
|
||||
|
||||
static const char * const buildid_cache_usage[] = {
|
||||
"perf buildid-cache [<options>]",
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct option buildid_cache_options[] = {
|
||||
OPT_STRING('a', "add", &add_name_list_str,
|
||||
"file list", "file(s) to add"),
|
||||
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
|
||||
"file(s) to remove"),
|
||||
OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
static int build_id_cache__add_file(const char *filename, const char *debugdir)
|
||||
{
|
||||
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
|
||||
u8 build_id[BUILD_ID_SIZE];
|
||||
int err;
|
||||
|
||||
if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
|
||||
pr_debug("Couldn't read a build-id in %s\n", filename);
|
||||
return -1;
|
||||
}
|
||||
|
||||
build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
|
||||
err = build_id_cache__add_s(sbuild_id, debugdir, filename, false);
|
||||
if (verbose)
|
||||
pr_info("Adding %s %s: %s\n", sbuild_id, filename,
|
||||
err ? "FAIL" : "Ok");
|
||||
return err;
|
||||
}
|
||||
|
||||
static int build_id_cache__remove_file(const char *filename __used,
|
||||
const char *debugdir __used)
|
||||
{
|
||||
u8 build_id[BUILD_ID_SIZE];
|
||||
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
|
||||
|
||||
int err;
|
||||
|
||||
if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
|
||||
pr_debug("Couldn't read a build-id in %s\n", filename);
|
||||
return -1;
|
||||
}
|
||||
|
||||
build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
|
||||
err = build_id_cache__remove_s(sbuild_id, debugdir);
|
||||
if (verbose)
|
||||
pr_info("Removing %s %s: %s\n", sbuild_id, filename,
|
||||
err ? "FAIL" : "Ok");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __cmd_buildid_cache(void)
|
||||
{
|
||||
struct strlist *list;
|
||||
struct str_node *pos;
|
||||
char debugdir[PATH_MAX];
|
||||
|
||||
snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
|
||||
DEBUG_CACHE_DIR);
|
||||
|
||||
if (add_name_list_str) {
|
||||
list = strlist__new(true, add_name_list_str);
|
||||
if (list) {
|
||||
strlist__for_each(pos, list)
|
||||
if (build_id_cache__add_file(pos->s, debugdir)) {
|
||||
if (errno == EEXIST) {
|
||||
pr_debug("%s already in the cache\n",
|
||||
pos->s);
|
||||
continue;
|
||||
}
|
||||
pr_warning("Couldn't add %s: %s\n",
|
||||
pos->s, strerror(errno));
|
||||
}
|
||||
|
||||
strlist__delete(list);
|
||||
}
|
||||
}
|
||||
|
||||
if (remove_name_list_str) {
|
||||
list = strlist__new(true, remove_name_list_str);
|
||||
if (list) {
|
||||
strlist__for_each(pos, list)
|
||||
if (build_id_cache__remove_file(pos->s, debugdir)) {
|
||||
if (errno == ENOENT) {
|
||||
pr_debug("%s wasn't in the cache\n",
|
||||
pos->s);
|
||||
continue;
|
||||
}
|
||||
pr_warning("Couldn't remove %s: %s\n",
|
||||
pos->s, strerror(errno));
|
||||
}
|
||||
|
||||
strlist__delete(list);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used)
|
||||
{
|
||||
argc = parse_options(argc, argv, buildid_cache_options,
|
||||
buildid_cache_usage, 0);
|
||||
|
||||
if (symbol__init() < 0)
|
||||
return -1;
|
||||
|
||||
setup_pager();
|
||||
return __cmd_buildid_cache();
|
||||
}
|
@ -8,6 +8,7 @@
|
||||
*/
|
||||
#include "builtin.h"
|
||||
#include "perf.h"
|
||||
#include "util/build-id.h"
|
||||
#include "util/cache.h"
|
||||
#include "util/debug.h"
|
||||
#include "util/parse-options.h"
|
||||
@ -16,6 +17,7 @@
|
||||
|
||||
static char const *input_name = "perf.data";
|
||||
static int force;
|
||||
static bool with_hits;
|
||||
|
||||
static const char * const buildid_list_usage[] = {
|
||||
"perf buildid-list [<options>]",
|
||||
@ -23,6 +25,7 @@ static const char * const buildid_list_usage[] = {
|
||||
};
|
||||
|
||||
static const struct option options[] = {
|
||||
OPT_BOOLEAN('H', "with-hits", &with_hits, "Show only DSOs with hits"),
|
||||
OPT_STRING('i', "input", &input_name, "file",
|
||||
"input file name"),
|
||||
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
||||
@ -31,26 +34,6 @@ static const struct option options[] = {
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
static int perf_file_section__process_buildids(struct perf_file_section *self,
|
||||
int feat, int fd)
|
||||
{
|
||||
if (feat != HEADER_BUILD_ID)
|
||||
return 0;
|
||||
|
||||
if (lseek(fd, self->offset, SEEK_SET) < 0) {
|
||||
pr_warning("Failed to lseek to %Ld offset for buildids!\n",
|
||||
self->offset);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (perf_header__read_build_ids(fd, self->offset, self->size)) {
|
||||
pr_warning("Failed to read buildids!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __cmd_buildid_list(void)
|
||||
{
|
||||
int err = -1;
|
||||
@ -60,10 +43,10 @@ static int __cmd_buildid_list(void)
|
||||
if (session == NULL)
|
||||
return -1;
|
||||
|
||||
err = perf_header__process_sections(&session->header, session->fd,
|
||||
perf_file_section__process_buildids);
|
||||
if (err >= 0)
|
||||
dsos__fprintf_buildid(stdout);
|
||||
if (with_hits)
|
||||
perf_session__process_events(session, &build_id__mark_dso_hit_ops);
|
||||
|
||||
dsos__fprintf_buildid(stdout, with_hits);
|
||||
|
||||
perf_session__delete(session);
|
||||
return err;
|
||||
|
@ -42,8 +42,8 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
|
||||
struct addr_location al;
|
||||
struct sample_data data = { .period = 1, };
|
||||
|
||||
dump_printf("(IP, %d): %d: %p\n", event->header.misc,
|
||||
event->ip.pid, (void *)(long)event->ip.ip);
|
||||
dump_printf("(IP, %d): %d: %#Lx\n", event->header.misc,
|
||||
event->ip.pid, event->ip.ip);
|
||||
|
||||
if (event__preprocess_sample(event, session, &al, NULL) < 0) {
|
||||
pr_warning("problem processing %d event, skipping it.\n",
|
||||
@ -51,12 +51,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (al.filtered)
|
||||
if (al.filtered || al.sym == NULL)
|
||||
return 0;
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
if (al.sym && perf_session__add_hist_entry(session, &al, data.period)) {
|
||||
if (perf_session__add_hist_entry(session, &al, data.period)) {
|
||||
pr_warning("problem incrementing symbol count, skipping event\n");
|
||||
return -1;
|
||||
}
|
||||
@ -66,12 +66,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
|
||||
}
|
||||
|
||||
static struct perf_event_ops event_ops = {
|
||||
.process_sample_event = diff__process_sample_event,
|
||||
.process_mmap_event = event__process_mmap,
|
||||
.process_comm_event = event__process_comm,
|
||||
.process_exit_event = event__process_task,
|
||||
.process_fork_event = event__process_task,
|
||||
.process_lost_event = event__process_lost,
|
||||
.sample = diff__process_sample_event,
|
||||
.mmap = event__process_mmap,
|
||||
.comm = event__process_comm,
|
||||
.exit = event__process_task,
|
||||
.fork = event__process_task,
|
||||
.lost = event__process_lost,
|
||||
};
|
||||
|
||||
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
|
||||
@ -82,29 +82,19 @@ static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
|
||||
struct hist_entry *iter;
|
||||
|
||||
while (*p != NULL) {
|
||||
int cmp;
|
||||
parent = *p;
|
||||
iter = rb_entry(parent, struct hist_entry, rb_node);
|
||||
|
||||
cmp = strcmp(he->map->dso->name, iter->map->dso->name);
|
||||
if (cmp > 0)
|
||||
if (hist_entry__cmp(he, iter) < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else if (cmp < 0)
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
else {
|
||||
cmp = strcmp(he->sym->name, iter->sym->name);
|
||||
if (cmp > 0)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
}
|
||||
|
||||
rb_link_node(&he->rb_node, parent, p);
|
||||
rb_insert_color(&he->rb_node, root);
|
||||
}
|
||||
|
||||
static void perf_session__resort_by_name(struct perf_session *self)
|
||||
static void perf_session__resort_hist_entries(struct perf_session *self)
|
||||
{
|
||||
unsigned long position = 1;
|
||||
struct rb_root tmp = RB_ROOT;
|
||||
@ -122,29 +112,28 @@ static void perf_session__resort_by_name(struct perf_session *self)
|
||||
self->hists = tmp;
|
||||
}
|
||||
|
||||
static void perf_session__set_hist_entries_positions(struct perf_session *self)
|
||||
{
|
||||
perf_session__output_resort(self, self->events_stats.total);
|
||||
perf_session__resort_hist_entries(self);
|
||||
}
|
||||
|
||||
static struct hist_entry *
|
||||
perf_session__find_hist_entry_by_name(struct perf_session *self,
|
||||
struct hist_entry *he)
|
||||
perf_session__find_hist_entry(struct perf_session *self,
|
||||
struct hist_entry *he)
|
||||
{
|
||||
struct rb_node *n = self->hists.rb_node;
|
||||
|
||||
while (n) {
|
||||
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
|
||||
int cmp = strcmp(he->map->dso->name, iter->map->dso->name);
|
||||
int64_t cmp = hist_entry__cmp(he, iter);
|
||||
|
||||
if (cmp > 0)
|
||||
if (cmp < 0)
|
||||
n = n->rb_left;
|
||||
else if (cmp < 0)
|
||||
else if (cmp > 0)
|
||||
n = n->rb_right;
|
||||
else {
|
||||
cmp = strcmp(he->sym->name, iter->sym->name);
|
||||
if (cmp > 0)
|
||||
n = n->rb_left;
|
||||
else if (cmp < 0)
|
||||
n = n->rb_right;
|
||||
else
|
||||
return iter;
|
||||
}
|
||||
else
|
||||
return iter;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -155,11 +144,9 @@ static void perf_session__match_hists(struct perf_session *old_session,
|
||||
{
|
||||
struct rb_node *nd;
|
||||
|
||||
perf_session__resort_by_name(old_session);
|
||||
|
||||
for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) {
|
||||
struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
|
||||
pos->pair = perf_session__find_hist_entry_by_name(old_session, pos);
|
||||
pos->pair = perf_session__find_hist_entry(old_session, pos);
|
||||
}
|
||||
}
|
||||
|
||||
@ -177,9 +164,12 @@ static int __cmd_diff(void)
|
||||
ret = perf_session__process_events(session[i], &event_ops);
|
||||
if (ret)
|
||||
goto out_delete;
|
||||
perf_session__output_resort(session[i], session[i]->events_stats.total);
|
||||
}
|
||||
|
||||
perf_session__output_resort(session[1], session[1]->events_stats.total);
|
||||
if (show_displacement)
|
||||
perf_session__set_hist_entries_positions(session[0]);
|
||||
|
||||
perf_session__match_hists(session[0], session[1]);
|
||||
perf_session__fprintf_hists(session[1], session[0],
|
||||
show_displacement, stdout);
|
||||
@ -204,7 +194,7 @@ static const struct option options[] = {
|
||||
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
|
||||
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
|
||||
"load module symbols - WARNING: use only with -k and LIVE kernel"),
|
||||
OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths,
|
||||
OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
|
||||
"Don't shorten the pathnames taking into account the cwd"),
|
||||
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
|
||||
"only consider symbols in these dsos"),
|
||||
|
@ -286,8 +286,7 @@ void list_common_cmds_help(void)
|
||||
|
||||
puts(" The most commonly used perf commands are:");
|
||||
for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
|
||||
printf(" %s ", common_cmds[i].name);
|
||||
mput_char(' ', longest - strlen(common_cmds[i].name));
|
||||
printf(" %-*s ", longest, common_cmds[i].name);
|
||||
puts(common_cmds[i].help);
|
||||
}
|
||||
}
|
||||
@ -314,8 +313,6 @@ static const char *cmd_to_page(const char *perf_cmd)
|
||||
return "perf";
|
||||
else if (!prefixcmp(perf_cmd, "perf"))
|
||||
return perf_cmd;
|
||||
else if (is_perf_command(perf_cmd))
|
||||
return prepend("perf-", perf_cmd);
|
||||
else
|
||||
return prepend("perf-", perf_cmd);
|
||||
}
|
||||
|
@ -92,23 +92,18 @@ static void setup_cpunode_map(void)
|
||||
if (!dir1)
|
||||
return;
|
||||
|
||||
while (true) {
|
||||
dent1 = readdir(dir1);
|
||||
if (!dent1)
|
||||
break;
|
||||
|
||||
if (sscanf(dent1->d_name, "node%u", &mem) < 1)
|
||||
while ((dent1 = readdir(dir1)) != NULL) {
|
||||
if (dent1->d_type != DT_DIR ||
|
||||
sscanf(dent1->d_name, "node%u", &mem) < 1)
|
||||
continue;
|
||||
|
||||
snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
|
||||
dir2 = opendir(buf);
|
||||
if (!dir2)
|
||||
continue;
|
||||
while (true) {
|
||||
dent2 = readdir(dir2);
|
||||
if (!dent2)
|
||||
break;
|
||||
if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
|
||||
while ((dent2 = readdir(dir2)) != NULL) {
|
||||
if (dent2->d_type != DT_LNK ||
|
||||
sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
|
||||
continue;
|
||||
cpunode_map[cpu] = mem;
|
||||
}
|
||||
@ -321,11 +316,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
|
||||
event->header.misc,
|
||||
data.pid, data.tid,
|
||||
(void *)(long)data.ip,
|
||||
(long long)data.period);
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
|
||||
data.pid, data.tid, data.ip, data.period);
|
||||
|
||||
thread = perf_session__findnew(session, event->ip.pid);
|
||||
if (thread == NULL) {
|
||||
@ -342,22 +334,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sample_type_check(struct perf_session *session)
|
||||
{
|
||||
if (!(session->sample_type & PERF_SAMPLE_RAW)) {
|
||||
fprintf(stderr,
|
||||
"No trace sample to read. Did you call perf record "
|
||||
"without -R?");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_event_ops event_ops = {
|
||||
.process_sample_event = process_sample_event,
|
||||
.process_comm_event = event__process_comm,
|
||||
.sample_type_check = sample_type_check,
|
||||
.sample = process_sample_event,
|
||||
.comm = event__process_comm,
|
||||
};
|
||||
|
||||
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
|
||||
@ -390,7 +369,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
|
||||
if (is_caller) {
|
||||
addr = data->call_site;
|
||||
if (!raw_ip)
|
||||
sym = map_groups__find_function(&session->kmaps, session, addr, NULL);
|
||||
sym = map_groups__find_function(&session->kmaps, addr, NULL);
|
||||
} else
|
||||
addr = data->ptr;
|
||||
|
||||
@ -504,11 +483,14 @@ static void sort_result(void)
|
||||
|
||||
static int __cmd_kmem(void)
|
||||
{
|
||||
int err;
|
||||
int err = -EINVAL;
|
||||
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
|
||||
if (session == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!perf_session__has_traces(session, "kmem record"))
|
||||
goto out_delete;
|
||||
|
||||
setup_pager();
|
||||
err = perf_session__process_events(session, &event_ops);
|
||||
if (err != 0)
|
||||
|
678
tools/perf/builtin-lock.c
Normal file
678
tools/perf/builtin-lock.c
Normal file
@ -0,0 +1,678 @@
|
||||
#include "builtin.h"
|
||||
#include "perf.h"
|
||||
|
||||
#include "util/util.h"
|
||||
#include "util/cache.h"
|
||||
#include "util/symbol.h"
|
||||
#include "util/thread.h"
|
||||
#include "util/header.h"
|
||||
|
||||
#include "util/parse-options.h"
|
||||
#include "util/trace-event.h"
|
||||
|
||||
#include "util/debug.h"
|
||||
#include "util/session.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <semaphore.h>
|
||||
#include <pthread.h>
|
||||
#include <math.h>
|
||||
#include <limits.h>
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
/* based on kernel/lockdep.c */
|
||||
#define LOCKHASH_BITS 12
|
||||
#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
|
||||
|
||||
static struct list_head lockhash_table[LOCKHASH_SIZE];
|
||||
|
||||
#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
|
||||
#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
|
||||
|
||||
#define LOCK_STATE_UNLOCKED 0 /* initial state */
|
||||
#define LOCK_STATE_LOCKED 1
|
||||
|
||||
struct lock_stat {
|
||||
struct list_head hash_entry;
|
||||
struct rb_node rb; /* used for sorting */
|
||||
|
||||
/*
|
||||
* FIXME: raw_field_value() returns unsigned long long,
|
||||
* so address of lockdep_map should be dealed as 64bit.
|
||||
* Is there more better solution?
|
||||
*/
|
||||
void *addr; /* address of lockdep_map, used as ID */
|
||||
char *name; /* for strcpy(), we cannot use const */
|
||||
|
||||
int state;
|
||||
u64 prev_event_time; /* timestamp of previous event */
|
||||
|
||||
unsigned int nr_acquired;
|
||||
unsigned int nr_acquire;
|
||||
unsigned int nr_contended;
|
||||
unsigned int nr_release;
|
||||
|
||||
/* these times are in nano sec. */
|
||||
u64 wait_time_total;
|
||||
u64 wait_time_min;
|
||||
u64 wait_time_max;
|
||||
};
|
||||
|
||||
/* build simple key function one is bigger than two */
|
||||
#define SINGLE_KEY(member) \
|
||||
static int lock_stat_key_ ## member(struct lock_stat *one, \
|
||||
struct lock_stat *two) \
|
||||
{ \
|
||||
return one->member > two->member; \
|
||||
}
|
||||
|
||||
SINGLE_KEY(nr_acquired)
|
||||
SINGLE_KEY(nr_contended)
|
||||
SINGLE_KEY(wait_time_total)
|
||||
SINGLE_KEY(wait_time_min)
|
||||
SINGLE_KEY(wait_time_max)
|
||||
|
||||
struct lock_key {
|
||||
/*
|
||||
* name: the value for specify by user
|
||||
* this should be simpler than raw name of member
|
||||
* e.g. nr_acquired -> acquired, wait_time_total -> wait_total
|
||||
*/
|
||||
const char *name;
|
||||
int (*key)(struct lock_stat*, struct lock_stat*);
|
||||
};
|
||||
|
||||
static const char *sort_key = "acquired";
|
||||
|
||||
static int (*compare)(struct lock_stat *, struct lock_stat *);
|
||||
|
||||
static struct rb_root result; /* place to store sorted data */
|
||||
|
||||
#define DEF_KEY_LOCK(name, fn_suffix) \
|
||||
{ #name, lock_stat_key_ ## fn_suffix }
|
||||
struct lock_key keys[] = {
|
||||
DEF_KEY_LOCK(acquired, nr_acquired),
|
||||
DEF_KEY_LOCK(contended, nr_contended),
|
||||
DEF_KEY_LOCK(wait_total, wait_time_total),
|
||||
DEF_KEY_LOCK(wait_min, wait_time_min),
|
||||
DEF_KEY_LOCK(wait_max, wait_time_max),
|
||||
|
||||
/* extra comparisons much complicated should be here */
|
||||
|
||||
{ NULL, NULL }
|
||||
};
|
||||
|
||||
static void select_key(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; keys[i].name; i++) {
|
||||
if (!strcmp(keys[i].name, sort_key)) {
|
||||
compare = keys[i].key;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
die("Unknown compare key:%s\n", sort_key);
|
||||
}
|
||||
|
||||
static void insert_to_result(struct lock_stat *st,
|
||||
int (*bigger)(struct lock_stat *, struct lock_stat *))
|
||||
{
|
||||
struct rb_node **rb = &result.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct lock_stat *p;
|
||||
|
||||
while (*rb) {
|
||||
p = container_of(*rb, struct lock_stat, rb);
|
||||
parent = *rb;
|
||||
|
||||
if (bigger(st, p))
|
||||
rb = &(*rb)->rb_left;
|
||||
else
|
||||
rb = &(*rb)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&st->rb, parent, rb);
|
||||
rb_insert_color(&st->rb, &result);
|
||||
}
|
||||
|
||||
/* returns left most element of result, and erase it */
|
||||
static struct lock_stat *pop_from_result(void)
|
||||
{
|
||||
struct rb_node *node = result.rb_node;
|
||||
|
||||
if (!node)
|
||||
return NULL;
|
||||
|
||||
while (node->rb_left)
|
||||
node = node->rb_left;
|
||||
|
||||
rb_erase(node, &result);
|
||||
return container_of(node, struct lock_stat, rb);
|
||||
}
|
||||
|
||||
static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
|
||||
{
|
||||
struct list_head *entry = lockhashentry(addr);
|
||||
struct lock_stat *ret, *new;
|
||||
|
||||
list_for_each_entry(ret, entry, hash_entry) {
|
||||
if (ret->addr == addr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
new = zalloc(sizeof(struct lock_stat));
|
||||
if (!new)
|
||||
goto alloc_failed;
|
||||
|
||||
new->addr = addr;
|
||||
new->name = zalloc(sizeof(char) * strlen(name) + 1);
|
||||
if (!new->name)
|
||||
goto alloc_failed;
|
||||
strcpy(new->name, name);
|
||||
|
||||
/* LOCK_STATE_UNLOCKED == 0 isn't guaranteed forever */
|
||||
new->state = LOCK_STATE_UNLOCKED;
|
||||
new->wait_time_min = ULLONG_MAX;
|
||||
|
||||
list_add(&new->hash_entry, entry);
|
||||
return new;
|
||||
|
||||
alloc_failed:
|
||||
die("memory allocation failed\n");
|
||||
}
|
||||
|
||||
static char const *input_name = "perf.data";
|
||||
|
||||
static int profile_cpu = -1;
|
||||
|
||||
struct raw_event_sample {
|
||||
u32 size;
|
||||
char data[0];
|
||||
};
|
||||
|
||||
struct trace_acquire_event {
|
||||
void *addr;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct trace_acquired_event {
|
||||
void *addr;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct trace_contended_event {
|
||||
void *addr;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct trace_release_event {
|
||||
void *addr;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct trace_lock_handler {
|
||||
void (*acquire_event)(struct trace_acquire_event *,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *thread);
|
||||
|
||||
void (*acquired_event)(struct trace_acquired_event *,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *thread);
|
||||
|
||||
void (*contended_event)(struct trace_contended_event *,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *thread);
|
||||
|
||||
void (*release_event)(struct trace_release_event *,
|
||||
struct event *,
|
||||
int cpu,
|
||||
u64 timestamp,
|
||||
struct thread *thread);
|
||||
};
|
||||
|
||||
static void
|
||||
report_lock_acquire_event(struct trace_acquire_event *acquire_event,
|
||||
struct event *__event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct lock_stat *st;
|
||||
|
||||
st = lock_stat_findnew(acquire_event->addr, acquire_event->name);
|
||||
|
||||
switch (st->state) {
|
||||
case LOCK_STATE_UNLOCKED:
|
||||
break;
|
||||
case LOCK_STATE_LOCKED:
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
break;
|
||||
}
|
||||
|
||||
st->prev_event_time = timestamp;
|
||||
}
|
||||
|
||||
static void
|
||||
report_lock_acquired_event(struct trace_acquired_event *acquired_event,
|
||||
struct event *__event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct lock_stat *st;
|
||||
|
||||
st = lock_stat_findnew(acquired_event->addr, acquired_event->name);
|
||||
|
||||
switch (st->state) {
|
||||
case LOCK_STATE_UNLOCKED:
|
||||
st->state = LOCK_STATE_LOCKED;
|
||||
st->nr_acquired++;
|
||||
break;
|
||||
case LOCK_STATE_LOCKED:
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
break;
|
||||
}
|
||||
|
||||
st->prev_event_time = timestamp;
|
||||
}
|
||||
|
||||
static void
|
||||
report_lock_contended_event(struct trace_contended_event *contended_event,
|
||||
struct event *__event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct lock_stat *st;
|
||||
|
||||
st = lock_stat_findnew(contended_event->addr, contended_event->name);
|
||||
|
||||
switch (st->state) {
|
||||
case LOCK_STATE_UNLOCKED:
|
||||
break;
|
||||
case LOCK_STATE_LOCKED:
|
||||
st->nr_contended++;
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
break;
|
||||
}
|
||||
|
||||
st->prev_event_time = timestamp;
|
||||
}
|
||||
|
||||
static void
|
||||
report_lock_release_event(struct trace_release_event *release_event,
|
||||
struct event *__event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct lock_stat *st;
|
||||
u64 hold_time;
|
||||
|
||||
st = lock_stat_findnew(release_event->addr, release_event->name);
|
||||
|
||||
switch (st->state) {
|
||||
case LOCK_STATE_UNLOCKED:
|
||||
break;
|
||||
case LOCK_STATE_LOCKED:
|
||||
st->state = LOCK_STATE_UNLOCKED;
|
||||
hold_time = timestamp - st->prev_event_time;
|
||||
|
||||
if (timestamp < st->prev_event_time) {
|
||||
/* terribly, this can happen... */
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (st->wait_time_min > hold_time)
|
||||
st->wait_time_min = hold_time;
|
||||
if (st->wait_time_max < hold_time)
|
||||
st->wait_time_max = hold_time;
|
||||
st->wait_time_total += hold_time;
|
||||
|
||||
st->nr_release++;
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
break;
|
||||
}
|
||||
|
||||
end:
|
||||
st->prev_event_time = timestamp;
|
||||
}
|
||||
|
||||
/* lock oriented handlers */
|
||||
/* TODO: handlers for CPU oriented, thread oriented */
|
||||
static struct trace_lock_handler report_lock_ops = {
|
||||
.acquire_event = report_lock_acquire_event,
|
||||
.acquired_event = report_lock_acquired_event,
|
||||
.contended_event = report_lock_contended_event,
|
||||
.release_event = report_lock_release_event,
|
||||
};
|
||||
|
||||
static struct trace_lock_handler *trace_handler;
|
||||
|
||||
static void
|
||||
process_lock_acquire_event(void *data,
|
||||
struct event *event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct trace_acquire_event acquire_event;
|
||||
u64 tmp; /* this is required for casting... */
|
||||
|
||||
tmp = raw_field_value(event, "lockdep_addr", data);
|
||||
memcpy(&acquire_event.addr, &tmp, sizeof(void *));
|
||||
acquire_event.name = (char *)raw_field_ptr(event, "name", data);
|
||||
|
||||
if (trace_handler->acquire_event)
|
||||
trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static void
|
||||
process_lock_acquired_event(void *data,
|
||||
struct event *event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct trace_acquired_event acquired_event;
|
||||
u64 tmp; /* this is required for casting... */
|
||||
|
||||
tmp = raw_field_value(event, "lockdep_addr", data);
|
||||
memcpy(&acquired_event.addr, &tmp, sizeof(void *));
|
||||
acquired_event.name = (char *)raw_field_ptr(event, "name", data);
|
||||
|
||||
if (trace_handler->acquire_event)
|
||||
trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static void
|
||||
process_lock_contended_event(void *data,
|
||||
struct event *event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct trace_contended_event contended_event;
|
||||
u64 tmp; /* this is required for casting... */
|
||||
|
||||
tmp = raw_field_value(event, "lockdep_addr", data);
|
||||
memcpy(&contended_event.addr, &tmp, sizeof(void *));
|
||||
contended_event.name = (char *)raw_field_ptr(event, "name", data);
|
||||
|
||||
if (trace_handler->acquire_event)
|
||||
trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static void
|
||||
process_lock_release_event(void *data,
|
||||
struct event *event __used,
|
||||
int cpu __used,
|
||||
u64 timestamp __used,
|
||||
struct thread *thread __used)
|
||||
{
|
||||
struct trace_release_event release_event;
|
||||
u64 tmp; /* this is required for casting... */
|
||||
|
||||
tmp = raw_field_value(event, "lockdep_addr", data);
|
||||
memcpy(&release_event.addr, &tmp, sizeof(void *));
|
||||
release_event.name = (char *)raw_field_ptr(event, "name", data);
|
||||
|
||||
if (trace_handler->acquire_event)
|
||||
trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static void
|
||||
process_raw_event(void *data, int cpu,
|
||||
u64 timestamp, struct thread *thread)
|
||||
{
|
||||
struct event *event;
|
||||
int type;
|
||||
|
||||
type = trace_parse_common_type(data);
|
||||
event = trace_find_event(type);
|
||||
|
||||
if (!strcmp(event->name, "lock_acquire"))
|
||||
process_lock_acquire_event(data, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "lock_acquired"))
|
||||
process_lock_acquired_event(data, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "lock_contended"))
|
||||
process_lock_contended_event(data, event, cpu, timestamp, thread);
|
||||
if (!strcmp(event->name, "lock_release"))
|
||||
process_lock_release_event(data, event, cpu, timestamp, thread);
|
||||
}
|
||||
|
||||
static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
{
|
||||
struct thread *thread;
|
||||
struct sample_data data;
|
||||
|
||||
bzero(&data, sizeof(struct sample_data));
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
thread = perf_session__findnew(session, data.pid);
|
||||
|
||||
if (thread == NULL) {
|
||||
pr_debug("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
||||
|
||||
if (profile_cpu != -1 && profile_cpu != (int) data.cpu)
|
||||
return 0;
|
||||
|
||||
process_raw_event(data.raw_data, data.cpu, data.time, thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TODO: various way to print, coloring, nano or milli sec */
|
||||
static void print_result(void)
|
||||
{
|
||||
struct lock_stat *st;
|
||||
char cut_name[20];
|
||||
|
||||
printf("%18s ", "ID");
|
||||
printf("%20s ", "Name");
|
||||
printf("%10s ", "acquired");
|
||||
printf("%10s ", "contended");
|
||||
|
||||
printf("%15s ", "total wait (ns)");
|
||||
printf("%15s ", "max wait (ns)");
|
||||
printf("%15s ", "min wait (ns)");
|
||||
|
||||
printf("\n\n");
|
||||
|
||||
while ((st = pop_from_result())) {
|
||||
bzero(cut_name, 20);
|
||||
|
||||
printf("%p ", st->addr);
|
||||
|
||||
if (strlen(st->name) < 16) {
|
||||
/* output raw name */
|
||||
printf("%20s ", st->name);
|
||||
} else {
|
||||
strncpy(cut_name, st->name, 16);
|
||||
cut_name[16] = '.';
|
||||
cut_name[17] = '.';
|
||||
cut_name[18] = '.';
|
||||
cut_name[19] = '\0';
|
||||
/* cut off name for saving output style */
|
||||
printf("%20s ", cut_name);
|
||||
}
|
||||
|
||||
printf("%10u ", st->nr_acquired);
|
||||
printf("%10u ", st->nr_contended);
|
||||
|
||||
printf("%15llu ", st->wait_time_total);
|
||||
printf("%15llu ", st->wait_time_max);
|
||||
printf("%15llu ", st->wait_time_min == ULLONG_MAX ?
|
||||
0 : st->wait_time_min);
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void dump_map(void)
|
||||
{
|
||||
unsigned int i;
|
||||
struct lock_stat *st;
|
||||
|
||||
for (i = 0; i < LOCKHASH_SIZE; i++) {
|
||||
list_for_each_entry(st, &lockhash_table[i], hash_entry) {
|
||||
printf("%p: %s\n", st->addr, st->name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct perf_event_ops eops = {
|
||||
.sample = process_sample_event,
|
||||
.comm = event__process_comm,
|
||||
};
|
||||
|
||||
static struct perf_session *session;
|
||||
|
||||
static int read_events(void)
|
||||
{
|
||||
session = perf_session__new(input_name, O_RDONLY, 0);
|
||||
if (!session)
|
||||
die("Initializing perf session failed\n");
|
||||
|
||||
return perf_session__process_events(session, &eops);
|
||||
}
|
||||
|
||||
static void sort_result(void)
|
||||
{
|
||||
unsigned int i;
|
||||
struct lock_stat *st;
|
||||
|
||||
for (i = 0; i < LOCKHASH_SIZE; i++) {
|
||||
list_for_each_entry(st, &lockhash_table[i], hash_entry) {
|
||||
insert_to_result(st, compare);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __cmd_report(void)
|
||||
{
|
||||
setup_pager();
|
||||
select_key();
|
||||
read_events();
|
||||
sort_result();
|
||||
print_result();
|
||||
}
|
||||
|
||||
static const char * const report_usage[] = {
|
||||
"perf lock report [<options>]",
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct option report_options[] = {
|
||||
OPT_STRING('k', "key", &sort_key, "acquired",
|
||||
"key for sorting"),
|
||||
/* TODO: type */
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
static const char * const lock_usage[] = {
|
||||
"perf lock [<options>] {record|trace|report}",
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct option lock_options[] = {
|
||||
OPT_STRING('i', "input", &input_name, "file", "input file name"),
|
||||
OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
|
||||
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
static const char *record_args[] = {
|
||||
"record",
|
||||
"-a",
|
||||
"-R",
|
||||
"-M",
|
||||
"-f",
|
||||
"-m", "1024",
|
||||
"-c", "1",
|
||||
"-e", "lock:lock_acquire:r",
|
||||
"-e", "lock:lock_acquired:r",
|
||||
"-e", "lock:lock_contended:r",
|
||||
"-e", "lock:lock_release:r",
|
||||
};
|
||||
|
||||
static int __cmd_record(int argc, const char **argv)
|
||||
{
|
||||
unsigned int rec_argc, i, j;
|
||||
const char **rec_argv;
|
||||
|
||||
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
|
||||
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
||||
rec_argv[i] = strdup(record_args[i]);
|
||||
|
||||
for (j = 1; j < (unsigned int)argc; j++, i++)
|
||||
rec_argv[i] = argv[j];
|
||||
|
||||
BUG_ON(i != rec_argc);
|
||||
|
||||
return cmd_record(i, rec_argv, NULL);
|
||||
}
|
||||
|
||||
int cmd_lock(int argc, const char **argv, const char *prefix __used)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
symbol__init();
|
||||
for (i = 0; i < LOCKHASH_SIZE; i++)
|
||||
INIT_LIST_HEAD(lockhash_table + i);
|
||||
|
||||
argc = parse_options(argc, argv, lock_options, lock_usage,
|
||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
if (!argc)
|
||||
usage_with_options(lock_usage, lock_options);
|
||||
|
||||
if (!strncmp(argv[0], "rec", 3)) {
|
||||
return __cmd_record(argc, argv);
|
||||
} else if (!strncmp(argv[0], "report", 6)) {
|
||||
trace_handler = &report_lock_ops;
|
||||
if (argc) {
|
||||
argc = parse_options(argc, argv,
|
||||
report_options, report_usage, 0);
|
||||
if (argc)
|
||||
usage_with_options(report_usage, report_options);
|
||||
}
|
||||
__cmd_report();
|
||||
} else if (!strcmp(argv[0], "trace")) {
|
||||
/* Aliased to 'perf trace' */
|
||||
return cmd_trace(argc, argv, prefix);
|
||||
} else if (!strcmp(argv[0], "map")) {
|
||||
/* recycling report_lock_ops */
|
||||
trace_handler = &report_lock_ops;
|
||||
setup_pager();
|
||||
read_events();
|
||||
dump_map();
|
||||
} else {
|
||||
usage_with_options(lock_usage, lock_options);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -41,7 +41,6 @@
|
||||
#include "util/debugfs.h"
|
||||
#include "util/symbol.h"
|
||||
#include "util/thread.h"
|
||||
#include "util/session.h"
|
||||
#include "util/parse-options.h"
|
||||
#include "util/parse-events.h" /* For debugfs_path */
|
||||
#include "util/probe-finder.h"
|
||||
@ -55,11 +54,13 @@ static struct {
|
||||
bool need_dwarf;
|
||||
bool list_events;
|
||||
bool force_add;
|
||||
bool show_lines;
|
||||
int nr_probe;
|
||||
struct probe_point probes[MAX_PROBES];
|
||||
struct strlist *dellist;
|
||||
struct perf_session *psession;
|
||||
struct map *kmap;
|
||||
struct map_groups kmap_groups;
|
||||
struct map *kmaps[MAP__NR_TYPES];
|
||||
struct line_range line_range;
|
||||
} session;
|
||||
|
||||
|
||||
@ -120,8 +121,8 @@ static int opt_del_probe_event(const struct option *opt __used,
|
||||
static void evaluate_probe_point(struct probe_point *pp)
|
||||
{
|
||||
struct symbol *sym;
|
||||
sym = map__find_symbol_by_name(session.kmap, pp->function,
|
||||
session.psession, NULL);
|
||||
sym = map__find_symbol_by_name(session.kmaps[MAP__FUNCTION],
|
||||
pp->function, NULL);
|
||||
if (!sym)
|
||||
die("Kernel symbol \'%s\' not found - probe not added.",
|
||||
pp->function);
|
||||
@ -130,12 +131,23 @@ static void evaluate_probe_point(struct probe_point *pp)
|
||||
#ifndef NO_LIBDWARF
|
||||
static int open_vmlinux(void)
|
||||
{
|
||||
if (map__load(session.kmap, session.psession, NULL) < 0) {
|
||||
if (map__load(session.kmaps[MAP__FUNCTION], NULL) < 0) {
|
||||
pr_debug("Failed to load kernel map.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pr_debug("Try to open %s\n", session.kmap->dso->long_name);
|
||||
return open(session.kmap->dso->long_name, O_RDONLY);
|
||||
pr_debug("Try to open %s\n",
|
||||
session.kmaps[MAP__FUNCTION]->dso->long_name);
|
||||
return open(session.kmaps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
|
||||
}
|
||||
|
||||
static int opt_show_lines(const struct option *opt __used,
|
||||
const char *str, int unset __used)
|
||||
{
|
||||
if (str)
|
||||
parse_line_range_desc(str, &session.line_range);
|
||||
INIT_LIST_HEAD(&session.line_range.line_list);
|
||||
session.show_lines = true;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -144,6 +156,7 @@ static const char * const probe_usage[] = {
|
||||
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
|
||||
"perf probe [<options>] --del '[GROUP:]EVENT' ...",
|
||||
"perf probe --list",
|
||||
"perf probe --line 'LINEDESC'",
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -182,9 +195,31 @@ static const struct option options[] = {
|
||||
opt_add_probe_event),
|
||||
OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events"
|
||||
" with existing name"),
|
||||
#ifndef NO_LIBDWARF
|
||||
OPT_CALLBACK('L', "line", NULL,
|
||||
"FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]",
|
||||
"Show source code lines.", opt_show_lines),
|
||||
#endif
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
/* Initialize symbol maps for vmlinux */
|
||||
static void init_vmlinux(void)
|
||||
{
|
||||
symbol_conf.sort_by_name = true;
|
||||
if (symbol_conf.vmlinux_name == NULL)
|
||||
symbol_conf.try_vmlinux_path = true;
|
||||
else
|
||||
pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
|
||||
if (symbol__init() < 0)
|
||||
die("Failed to init symbol map.");
|
||||
|
||||
map_groups__init(&session.kmap_groups);
|
||||
if (map_groups__create_kernel_maps(&session.kmap_groups,
|
||||
session.kmaps) < 0)
|
||||
die("Failed to create kernel maps.");
|
||||
}
|
||||
|
||||
int cmd_probe(int argc, const char **argv, const char *prefix __used)
|
||||
{
|
||||
int i, ret;
|
||||
@ -203,7 +238,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
|
||||
parse_probe_event_argv(argc, argv);
|
||||
}
|
||||
|
||||
if ((!session.nr_probe && !session.dellist && !session.list_events))
|
||||
if ((!session.nr_probe && !session.dellist && !session.list_events &&
|
||||
!session.show_lines))
|
||||
usage_with_options(probe_usage, options);
|
||||
|
||||
if (debugfs_valid_mountpoint(debugfs_path) < 0)
|
||||
@ -215,10 +251,34 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
|
||||
" --add/--del.\n");
|
||||
usage_with_options(probe_usage, options);
|
||||
}
|
||||
if (session.show_lines) {
|
||||
pr_warning(" Error: Don't use --list with --line.\n");
|
||||
usage_with_options(probe_usage, options);
|
||||
}
|
||||
show_perf_probe_events();
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef NO_LIBDWARF
|
||||
if (session.show_lines) {
|
||||
if (session.nr_probe != 0 || session.dellist) {
|
||||
pr_warning(" Error: Don't use --line with"
|
||||
" --add/--del.\n");
|
||||
usage_with_options(probe_usage, options);
|
||||
}
|
||||
init_vmlinux();
|
||||
fd = open_vmlinux();
|
||||
if (fd < 0)
|
||||
die("Could not open debuginfo file.");
|
||||
ret = find_line_range(fd, &session.line_range);
|
||||
if (ret <= 0)
|
||||
die("Source line is not found.\n");
|
||||
close(fd);
|
||||
show_line_range(&session.line_range);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (session.dellist) {
|
||||
del_trace_kprobe_events(session.dellist);
|
||||
strlist__delete(session.dellist);
|
||||
@ -226,20 +286,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initialize symbol maps for vmlinux */
|
||||
symbol_conf.sort_by_name = true;
|
||||
if (symbol_conf.vmlinux_name == NULL)
|
||||
symbol_conf.try_vmlinux_path = true;
|
||||
if (symbol__init() < 0)
|
||||
die("Failed to init symbol map.");
|
||||
session.psession = perf_session__new(NULL, O_WRONLY, false);
|
||||
if (session.psession == NULL)
|
||||
die("Failed to init perf_session.");
|
||||
session.kmap = map_groups__find_by_name(&session.psession->kmaps,
|
||||
MAP__FUNCTION,
|
||||
"[kernel.kallsyms]");
|
||||
if (!session.kmap)
|
||||
die("Could not find kernel map.\n");
|
||||
/* Add probes */
|
||||
init_vmlinux();
|
||||
|
||||
if (session.need_dwarf)
|
||||
#ifdef NO_LIBDWARF
|
||||
|
@ -5,10 +5,13 @@
|
||||
* (or a CPU, or a PID) into the perf.data output file - for
|
||||
* later analysis via perf report.
|
||||
*/
|
||||
#define _FILE_OFFSET_BITS 64
|
||||
|
||||
#include "builtin.h"
|
||||
|
||||
#include "perf.h"
|
||||
|
||||
#include "util/build-id.h"
|
||||
#include "util/util.h"
|
||||
#include "util/parse-options.h"
|
||||
#include "util/parse-events.h"
|
||||
@ -62,6 +65,7 @@ static int nr_poll = 0;
|
||||
static int nr_cpu = 0;
|
||||
|
||||
static int file_new = 1;
|
||||
static off_t post_processing_offset;
|
||||
|
||||
static struct perf_session *session;
|
||||
|
||||
@ -111,22 +115,10 @@ static void write_output(void *buf, size_t size)
|
||||
}
|
||||
}
|
||||
|
||||
static void write_event(event_t *buf, size_t size)
|
||||
{
|
||||
/*
|
||||
* Add it to the list of DSOs, so that when we finish this
|
||||
* record session we can pick the available build-ids.
|
||||
*/
|
||||
if (buf->header.type == PERF_RECORD_MMAP)
|
||||
dsos__findnew(buf->mmap.filename);
|
||||
|
||||
write_output(buf, size);
|
||||
}
|
||||
|
||||
static int process_synthesized_event(event_t *event,
|
||||
struct perf_session *self __used)
|
||||
{
|
||||
write_event(event, event->header.size);
|
||||
write_output(event, event->header.size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -178,14 +170,14 @@ static void mmap_read(struct mmap_data *md)
|
||||
size = md->mask + 1 - (old & md->mask);
|
||||
old += size;
|
||||
|
||||
write_event(buf, size);
|
||||
write_output(buf, size);
|
||||
}
|
||||
|
||||
buf = &data[old & md->mask];
|
||||
size = head - old;
|
||||
old += size;
|
||||
|
||||
write_event(buf, size);
|
||||
write_output(buf, size);
|
||||
|
||||
md->prev = old;
|
||||
mmap_write_tail(md, old);
|
||||
@ -395,10 +387,21 @@ static void open_counters(int cpu, pid_t pid)
|
||||
nr_cpu++;
|
||||
}
|
||||
|
||||
static int process_buildids(void)
|
||||
{
|
||||
u64 size = lseek(output, 0, SEEK_CUR);
|
||||
|
||||
session->fd = output;
|
||||
return __perf_session__process_events(session, post_processing_offset,
|
||||
size - post_processing_offset,
|
||||
size, &build_id__mark_dso_hit_ops);
|
||||
}
|
||||
|
||||
static void atexit_header(void)
|
||||
{
|
||||
session->header.data_size += bytes_written;
|
||||
|
||||
process_buildids();
|
||||
perf_header__write(&session->header, output, true);
|
||||
}
|
||||
|
||||
@ -551,8 +554,23 @@ static int __cmd_record(int argc, const char **argv)
|
||||
return err;
|
||||
}
|
||||
|
||||
post_processing_offset = lseek(output, 0, SEEK_CUR);
|
||||
|
||||
err = event__synthesize_kernel_mmap(process_synthesized_event,
|
||||
session, "_text");
|
||||
if (err < 0) {
|
||||
pr_err("Couldn't record kernel reference relocation symbol.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = event__synthesize_modules(process_synthesized_event, session);
|
||||
if (err < 0) {
|
||||
pr_err("Couldn't record kernel reference relocation symbol.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!system_wide && profile_cpu == -1)
|
||||
event__synthesize_thread(pid, process_synthesized_event,
|
||||
event__synthesize_thread(target_pid, process_synthesized_event,
|
||||
session);
|
||||
else
|
||||
event__synthesize_threads(process_synthesized_event, session);
|
||||
|
@ -34,6 +34,8 @@
|
||||
static char const *input_name = "perf.data";
|
||||
|
||||
static int force;
|
||||
static bool hide_unresolved;
|
||||
static bool dont_use_callchains;
|
||||
|
||||
static int show_threads;
|
||||
static struct perf_read_values show_threads_values;
|
||||
@ -91,11 +93,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
|
||||
event->header.misc,
|
||||
data.pid, data.tid,
|
||||
(void *)(long)data.ip,
|
||||
(long long)data.period);
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
|
||||
data.pid, data.tid, data.ip, data.period);
|
||||
|
||||
if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||
unsigned int i;
|
||||
@ -121,7 +120,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (al.filtered)
|
||||
if (al.filtered || (hide_unresolved && al.sym == NULL))
|
||||
return 0;
|
||||
|
||||
if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) {
|
||||
@ -156,14 +155,14 @@ static int process_read_event(event_t *event, struct perf_session *session __use
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sample_type_check(struct perf_session *session)
|
||||
static int perf_session__setup_sample_type(struct perf_session *self)
|
||||
{
|
||||
if (!(session->sample_type & PERF_SAMPLE_CALLCHAIN)) {
|
||||
if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
|
||||
if (sort__has_parent) {
|
||||
fprintf(stderr, "selected --sort parent, but no"
|
||||
" callchain data. Did you call"
|
||||
" perf record without -g?\n");
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
if (symbol_conf.use_callchain) {
|
||||
fprintf(stderr, "selected -g but no callchain data."
|
||||
@ -171,12 +170,13 @@ static int sample_type_check(struct perf_session *session)
|
||||
" -g?\n");
|
||||
return -1;
|
||||
}
|
||||
} else if (callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) {
|
||||
} else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
|
||||
!symbol_conf.use_callchain) {
|
||||
symbol_conf.use_callchain = true;
|
||||
if (register_callchain_param(&callchain_param) < 0) {
|
||||
fprintf(stderr, "Can't register callchain"
|
||||
" params\n");
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -184,20 +184,18 @@ static int sample_type_check(struct perf_session *session)
|
||||
}
|
||||
|
||||
static struct perf_event_ops event_ops = {
|
||||
.process_sample_event = process_sample_event,
|
||||
.process_mmap_event = event__process_mmap,
|
||||
.process_comm_event = event__process_comm,
|
||||
.process_exit_event = event__process_task,
|
||||
.process_fork_event = event__process_task,
|
||||
.process_lost_event = event__process_lost,
|
||||
.process_read_event = process_read_event,
|
||||
.sample_type_check = sample_type_check,
|
||||
.sample = process_sample_event,
|
||||
.mmap = event__process_mmap,
|
||||
.comm = event__process_comm,
|
||||
.exit = event__process_task,
|
||||
.fork = event__process_task,
|
||||
.lost = event__process_lost,
|
||||
.read = process_read_event,
|
||||
};
|
||||
|
||||
|
||||
static int __cmd_report(void)
|
||||
{
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
struct perf_session *session;
|
||||
|
||||
session = perf_session__new(input_name, O_RDONLY, force);
|
||||
@ -207,6 +205,10 @@ static int __cmd_report(void)
|
||||
if (show_threads)
|
||||
perf_read_values_init(&show_threads_values);
|
||||
|
||||
ret = perf_session__setup_sample_type(session);
|
||||
if (ret)
|
||||
goto out_delete;
|
||||
|
||||
ret = perf_session__process_events(session, &event_ops);
|
||||
if (ret)
|
||||
goto out_delete;
|
||||
@ -243,11 +245,19 @@ out_delete:
|
||||
|
||||
static int
|
||||
parse_callchain_opt(const struct option *opt __used, const char *arg,
|
||||
int unset __used)
|
||||
int unset)
|
||||
{
|
||||
char *tok;
|
||||
char *endptr;
|
||||
|
||||
/*
|
||||
* --no-call-graph
|
||||
*/
|
||||
if (unset) {
|
||||
dont_use_callchains = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
symbol_conf.use_callchain = true;
|
||||
|
||||
if (!arg)
|
||||
@ -319,7 +329,7 @@ static const struct option options[] = {
|
||||
"pretty printing style key: normal raw"),
|
||||
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
||||
"sort by key(s): pid, comm, dso, symbol, parent"),
|
||||
OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths,
|
||||
OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
|
||||
"Don't shorten the pathnames taking into account the cwd"),
|
||||
OPT_STRING('p', "parent", &parent_pattern, "regex",
|
||||
"regex filter to identify parent, see: '--sort parent'"),
|
||||
@ -340,6 +350,8 @@ static const struct option options[] = {
|
||||
OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
|
||||
"separator for columns, no spaces will be added between "
|
||||
"columns '.' is reserved."),
|
||||
OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved,
|
||||
"Only display entries resolved to a symbol"),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
|
@ -1621,11 +1621,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
|
||||
event->header.misc,
|
||||
data.pid, data.tid,
|
||||
(void *)(long)data.ip,
|
||||
(long long)data.period);
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
|
||||
data.pid, data.tid, data.ip, data.period);
|
||||
|
||||
thread = perf_session__findnew(session, data.pid);
|
||||
if (thread == NULL) {
|
||||
@ -1653,33 +1650,22 @@ static int process_lost_event(event_t *event __used,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sample_type_check(struct perf_session *session __used)
|
||||
{
|
||||
if (!(session->sample_type & PERF_SAMPLE_RAW)) {
|
||||
fprintf(stderr,
|
||||
"No trace sample to read. Did you call perf record "
|
||||
"without -R?");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_event_ops event_ops = {
|
||||
.process_sample_event = process_sample_event,
|
||||
.process_comm_event = event__process_comm,
|
||||
.process_lost_event = process_lost_event,
|
||||
.sample_type_check = sample_type_check,
|
||||
.sample = process_sample_event,
|
||||
.comm = event__process_comm,
|
||||
.lost = process_lost_event,
|
||||
};
|
||||
|
||||
static int read_events(void)
|
||||
{
|
||||
int err;
|
||||
int err = -EINVAL;
|
||||
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
|
||||
if (session == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
err = perf_session__process_events(session, &event_ops);
|
||||
if (perf_session__has_traces(session, "record -R"))
|
||||
err = perf_session__process_events(session, &event_ops);
|
||||
|
||||
perf_session__delete(session);
|
||||
return err;
|
||||
}
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "util/parse-events.h"
|
||||
#include "util/event.h"
|
||||
#include "util/debug.h"
|
||||
#include "util/header.h"
|
||||
|
||||
#include <sys/prctl.h>
|
||||
#include <math.h>
|
||||
@ -79,6 +80,8 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS];
|
||||
|
||||
static int event_scaled[MAX_COUNTERS];
|
||||
|
||||
static volatile int done = 0;
|
||||
|
||||
struct stats
|
||||
{
|
||||
double n, mean, M2;
|
||||
@ -247,61 +250,64 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||
unsigned long long t0, t1;
|
||||
int status = 0;
|
||||
int counter;
|
||||
int pid;
|
||||
int pid = target_pid;
|
||||
int child_ready_pipe[2], go_pipe[2];
|
||||
const bool forks = (target_pid == -1 && argc > 0);
|
||||
char buf;
|
||||
|
||||
if (!system_wide)
|
||||
nr_cpus = 1;
|
||||
|
||||
if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) {
|
||||
if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
|
||||
perror("failed to create pipes");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((pid = fork()) < 0)
|
||||
perror("failed to fork");
|
||||
if (forks) {
|
||||
if ((pid = fork()) < 0)
|
||||
perror("failed to fork");
|
||||
|
||||
if (!pid) {
|
||||
close(child_ready_pipe[0]);
|
||||
close(go_pipe[1]);
|
||||
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
|
||||
if (!pid) {
|
||||
close(child_ready_pipe[0]);
|
||||
close(go_pipe[1]);
|
||||
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
|
||||
|
||||
/*
|
||||
* Do a dummy execvp to get the PLT entry resolved,
|
||||
* so we avoid the resolver overhead on the real
|
||||
* execvp call.
|
||||
*/
|
||||
execvp("", (char **)argv);
|
||||
|
||||
/*
|
||||
* Tell the parent we're ready to go
|
||||
*/
|
||||
close(child_ready_pipe[1]);
|
||||
|
||||
/*
|
||||
* Wait until the parent tells us to go.
|
||||
*/
|
||||
if (read(go_pipe[0], &buf, 1) == -1)
|
||||
perror("unable to read pipe");
|
||||
|
||||
execvp(argv[0], (char **)argv);
|
||||
|
||||
perror(argv[0]);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
child_pid = pid;
|
||||
|
||||
/*
|
||||
* Do a dummy execvp to get the PLT entry resolved,
|
||||
* so we avoid the resolver overhead on the real
|
||||
* execvp call.
|
||||
*/
|
||||
execvp("", (char **)argv);
|
||||
|
||||
/*
|
||||
* Tell the parent we're ready to go
|
||||
* Wait for the child to be ready to exec.
|
||||
*/
|
||||
close(child_ready_pipe[1]);
|
||||
|
||||
/*
|
||||
* Wait until the parent tells us to go.
|
||||
*/
|
||||
if (read(go_pipe[0], &buf, 1) == -1)
|
||||
close(go_pipe[0]);
|
||||
if (read(child_ready_pipe[0], &buf, 1) == -1)
|
||||
perror("unable to read pipe");
|
||||
|
||||
execvp(argv[0], (char **)argv);
|
||||
|
||||
perror(argv[0]);
|
||||
exit(-1);
|
||||
close(child_ready_pipe[0]);
|
||||
}
|
||||
|
||||
child_pid = pid;
|
||||
|
||||
/*
|
||||
* Wait for the child to be ready to exec.
|
||||
*/
|
||||
close(child_ready_pipe[1]);
|
||||
close(go_pipe[0]);
|
||||
if (read(child_ready_pipe[0], &buf, 1) == -1)
|
||||
perror("unable to read pipe");
|
||||
close(child_ready_pipe[0]);
|
||||
|
||||
for (counter = 0; counter < nr_counters; counter++)
|
||||
create_perf_stat_counter(counter, pid);
|
||||
|
||||
@ -310,8 +316,12 @@ static int run_perf_stat(int argc __used, const char **argv)
|
||||
*/
|
||||
t0 = rdclock();
|
||||
|
||||
close(go_pipe[1]);
|
||||
wait(&status);
|
||||
if (forks) {
|
||||
close(go_pipe[1]);
|
||||
wait(&status);
|
||||
} else {
|
||||
while(!done);
|
||||
}
|
||||
|
||||
t1 = rdclock();
|
||||
|
||||
@ -417,10 +427,13 @@ static void print_stat(int argc, const char **argv)
|
||||
fflush(stdout);
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, " Performance counter stats for \'%s", argv[0]);
|
||||
|
||||
for (i = 1; i < argc; i++)
|
||||
fprintf(stderr, " %s", argv[i]);
|
||||
fprintf(stderr, " Performance counter stats for ");
|
||||
if(target_pid == -1) {
|
||||
fprintf(stderr, "\'%s", argv[0]);
|
||||
for (i = 1; i < argc; i++)
|
||||
fprintf(stderr, " %s", argv[i]);
|
||||
}else
|
||||
fprintf(stderr, "task pid \'%d", target_pid);
|
||||
|
||||
fprintf(stderr, "\'");
|
||||
if (run_count > 1)
|
||||
@ -445,6 +458,9 @@ static volatile int signr = -1;
|
||||
|
||||
static void skip_signal(int signo)
|
||||
{
|
||||
if(target_pid != -1)
|
||||
done = 1;
|
||||
|
||||
signr = signo;
|
||||
}
|
||||
|
||||
@ -461,7 +477,7 @@ static void sig_atexit(void)
|
||||
}
|
||||
|
||||
static const char * const stat_usage[] = {
|
||||
"perf stat [<options>] <command>",
|
||||
"perf stat [<options>] [<command>]",
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -492,7 +508,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
|
||||
|
||||
argc = parse_options(argc, argv, options, stat_usage,
|
||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
if (!argc)
|
||||
if (!argc && target_pid == -1)
|
||||
usage_with_options(stat_usage, options);
|
||||
if (run_count <= 0)
|
||||
usage_with_options(stat_usage, options);
|
||||
|
@ -1029,33 +1029,24 @@ static void process_samples(struct perf_session *session)
|
||||
}
|
||||
}
|
||||
|
||||
static int sample_type_check(struct perf_session *session)
|
||||
{
|
||||
if (!(session->sample_type & PERF_SAMPLE_RAW)) {
|
||||
fprintf(stderr, "No trace samples found in the file.\n"
|
||||
"Have you used 'perf timechart record' to record it?\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_event_ops event_ops = {
|
||||
.process_comm_event = process_comm_event,
|
||||
.process_fork_event = process_fork_event,
|
||||
.process_exit_event = process_exit_event,
|
||||
.process_sample_event = queue_sample_event,
|
||||
.sample_type_check = sample_type_check,
|
||||
.comm = process_comm_event,
|
||||
.fork = process_fork_event,
|
||||
.exit = process_exit_event,
|
||||
.sample = queue_sample_event,
|
||||
};
|
||||
|
||||
static int __cmd_timechart(void)
|
||||
{
|
||||
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
|
||||
int ret;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (session == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!perf_session__has_traces(session, "timechart record"))
|
||||
goto out_delete;
|
||||
|
||||
ret = perf_session__process_events(session, &event_ops);
|
||||
if (ret)
|
||||
goto out_delete;
|
||||
|
@ -94,6 +94,7 @@ struct source_line {
|
||||
|
||||
static char *sym_filter = NULL;
|
||||
struct sym_entry *sym_filter_entry = NULL;
|
||||
struct sym_entry *sym_filter_entry_sched = NULL;
|
||||
static int sym_pcnt_filter = 5;
|
||||
static int sym_counter = 0;
|
||||
static int display_weighted = -1;
|
||||
@ -201,10 +202,9 @@ static void parse_source(struct sym_entry *syme)
|
||||
len = sym->end - sym->start;
|
||||
|
||||
sprintf(command,
|
||||
"objdump --start-address=0x%016Lx "
|
||||
"--stop-address=0x%016Lx -dS %s",
|
||||
map->unmap_ip(map, sym->start),
|
||||
map->unmap_ip(map, sym->end), path);
|
||||
"objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
|
||||
BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
|
||||
BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
|
||||
|
||||
file = popen(command, "r");
|
||||
if (!file)
|
||||
@ -215,7 +215,7 @@ static void parse_source(struct sym_entry *syme)
|
||||
while (!feof(file)) {
|
||||
struct source_line *src;
|
||||
size_t dummy = 0;
|
||||
char *c;
|
||||
char *c, *sep;
|
||||
|
||||
src = malloc(sizeof(struct source_line));
|
||||
assert(src != NULL);
|
||||
@ -234,14 +234,11 @@ static void parse_source(struct sym_entry *syme)
|
||||
*source->lines_tail = src;
|
||||
source->lines_tail = &src->next;
|
||||
|
||||
if (strlen(src->line)>8 && src->line[8] == ':') {
|
||||
src->eip = strtoull(src->line, NULL, 16);
|
||||
src->eip = map->unmap_ip(map, src->eip);
|
||||
}
|
||||
if (strlen(src->line)>8 && src->line[16] == ':') {
|
||||
src->eip = strtoull(src->line, NULL, 16);
|
||||
src->eip = map->unmap_ip(map, src->eip);
|
||||
}
|
||||
src->eip = strtoull(src->line, &sep, 16);
|
||||
if (*sep == ':')
|
||||
src->eip = map__objdump_2ip(map, src->eip);
|
||||
else /* this line has no ip info (e.g. source line) */
|
||||
src->eip = 0;
|
||||
}
|
||||
pclose(file);
|
||||
out_assign:
|
||||
@ -276,6 +273,9 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
|
||||
goto out_unlock;
|
||||
|
||||
for (line = syme->src->lines; line; line = line->next) {
|
||||
/* skip lines without IP info */
|
||||
if (line->eip == 0)
|
||||
continue;
|
||||
if (line->eip == ip) {
|
||||
line->count[counter]++;
|
||||
break;
|
||||
@ -287,17 +287,20 @@ out_unlock:
|
||||
pthread_mutex_unlock(&syme->src->lock);
|
||||
}
|
||||
|
||||
#define PATTERN_LEN (BITS_PER_LONG / 4 + 2)
|
||||
|
||||
static void lookup_sym_source(struct sym_entry *syme)
|
||||
{
|
||||
struct symbol *symbol = sym_entry__symbol(syme);
|
||||
struct source_line *line;
|
||||
char pattern[PATH_MAX];
|
||||
char pattern[PATTERN_LEN + 1];
|
||||
|
||||
sprintf(pattern, "<%s>:", symbol->name);
|
||||
sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
|
||||
map__rip_2objdump(syme->map, symbol->start));
|
||||
|
||||
pthread_mutex_lock(&syme->src->lock);
|
||||
for (line = syme->src->lines; line; line = line->next) {
|
||||
if (strstr(line->line, pattern)) {
|
||||
if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
|
||||
syme->src->source = line;
|
||||
break;
|
||||
}
|
||||
@ -667,7 +670,7 @@ static void prompt_symbol(struct sym_entry **target, const char *msg)
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
fprintf(stderr, "Sorry, %s is not active.\n", sym_filter);
|
||||
fprintf(stderr, "Sorry, %s is not active.\n", buf);
|
||||
sleep(1);
|
||||
return;
|
||||
} else
|
||||
@ -695,11 +698,9 @@ static void print_mapped_keys(void)
|
||||
|
||||
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
|
||||
|
||||
if (symbol_conf.vmlinux_name) {
|
||||
fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
|
||||
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
|
||||
fprintf(stdout, "\t[S] stop annotation.\n");
|
||||
}
|
||||
fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
|
||||
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
|
||||
fprintf(stdout, "\t[S] stop annotation.\n");
|
||||
|
||||
if (nr_counters > 1)
|
||||
fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
|
||||
@ -725,14 +726,13 @@ static int key_mapped(int c)
|
||||
case 'Q':
|
||||
case 'K':
|
||||
case 'U':
|
||||
case 'F':
|
||||
case 's':
|
||||
case 'S':
|
||||
return 1;
|
||||
case 'E':
|
||||
case 'w':
|
||||
return nr_counters > 1 ? 1 : 0;
|
||||
case 'F':
|
||||
case 's':
|
||||
case 'S':
|
||||
return symbol_conf.vmlinux_name ? 1 : 0;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -910,8 +910,12 @@ static int symbol_filter(struct map *map, struct symbol *sym)
|
||||
syme = symbol__priv(sym);
|
||||
syme->map = map;
|
||||
syme->src = NULL;
|
||||
if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter))
|
||||
sym_filter_entry = syme;
|
||||
|
||||
if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
|
||||
/* schedule initial sym_filter_entry setup */
|
||||
sym_filter_entry_sched = syme;
|
||||
sym_filter = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; skip_symbols[i]; i++) {
|
||||
if (!strcmp(skip_symbols[i], name)) {
|
||||
@ -934,8 +938,11 @@ static void event__process_sample(const event_t *self,
|
||||
struct addr_location al;
|
||||
u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
|
||||
++samples;
|
||||
|
||||
switch (origin) {
|
||||
case PERF_RECORD_MISC_USER:
|
||||
++userspace_samples;
|
||||
if (hide_user_symbols)
|
||||
return;
|
||||
break;
|
||||
@ -948,9 +955,38 @@ static void event__process_sample(const event_t *self,
|
||||
}
|
||||
|
||||
if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
|
||||
al.sym == NULL || al.filtered)
|
||||
al.filtered)
|
||||
return;
|
||||
|
||||
if (al.sym == NULL) {
|
||||
/*
|
||||
* As we do lazy loading of symtabs we only will know if the
|
||||
* specified vmlinux file is invalid when we actually have a
|
||||
* hit in kernel space and then try to load it. So if we get
|
||||
* here and there are _no_ symbols in the DSO backing the
|
||||
* kernel map, bail out.
|
||||
*
|
||||
* We may never get here, for instance, if we use -K/
|
||||
* --hide-kernel-symbols, even if the user specifies an
|
||||
* invalid --vmlinux ;-)
|
||||
*/
|
||||
if (al.map == session->vmlinux_maps[MAP__FUNCTION] &&
|
||||
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
|
||||
pr_err("The %s file can't be used\n",
|
||||
symbol_conf.vmlinux_name);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* let's see, whether we need to install initial sym_filter_entry */
|
||||
if (sym_filter_entry_sched) {
|
||||
sym_filter_entry = sym_filter_entry_sched;
|
||||
sym_filter_entry_sched = NULL;
|
||||
parse_source(sym_filter_entry);
|
||||
}
|
||||
|
||||
syme = symbol__priv(al.sym);
|
||||
if (!syme->skip) {
|
||||
syme->count[counter]++;
|
||||
@ -960,9 +996,6 @@ static void event__process_sample(const event_t *self,
|
||||
if (list_empty(&syme->node) || !syme->node.next)
|
||||
__list_insert_active_sym(syme);
|
||||
pthread_mutex_unlock(&active_symbols_lock);
|
||||
if (origin == PERF_RECORD_MISC_USER)
|
||||
++userspace_samples;
|
||||
++samples;
|
||||
}
|
||||
}
|
||||
|
||||
@ -975,6 +1008,10 @@ static int event__process(event_t *event, struct perf_session *session)
|
||||
case PERF_RECORD_MMAP:
|
||||
event__process_mmap(event, session);
|
||||
break;
|
||||
case PERF_RECORD_FORK:
|
||||
case PERF_RECORD_EXIT:
|
||||
event__process_task(event, session);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1244,7 +1281,7 @@ static const struct option options[] = {
|
||||
OPT_BOOLEAN('i', "inherit", &inherit,
|
||||
"child tasks inherit counters"),
|
||||
OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
|
||||
"symbol to annotate - requires -k option"),
|
||||
"symbol to annotate"),
|
||||
OPT_BOOLEAN('z', "zero", &zero,
|
||||
"zero history across updates"),
|
||||
OPT_INTEGER('F', "freq", &freq,
|
||||
@ -1280,16 +1317,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
|
||||
|
||||
symbol_conf.priv_size = (sizeof(struct sym_entry) +
|
||||
(nr_counters + 1) * sizeof(unsigned long));
|
||||
if (symbol_conf.vmlinux_name == NULL)
|
||||
symbol_conf.try_vmlinux_path = true;
|
||||
|
||||
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
|
||||
if (symbol__init() < 0)
|
||||
return -1;
|
||||
|
||||
if (delay_secs < 1)
|
||||
delay_secs = 1;
|
||||
|
||||
parse_source(sym_filter_entry);
|
||||
|
||||
/*
|
||||
* User specified count overrides default frequency.
|
||||
*/
|
||||
|
@ -44,6 +44,7 @@ static void setup_scripting(void)
|
||||
perf_set_argv_exec_path(perf_exec_path());
|
||||
|
||||
setup_perl_scripting();
|
||||
setup_python_scripting();
|
||||
|
||||
scripting_ops = &default_scripting_ops;
|
||||
}
|
||||
@ -75,11 +76,8 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
|
||||
event__parse_sample(event, session->sample_type, &data);
|
||||
|
||||
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
|
||||
event->header.misc,
|
||||
data.pid, data.tid,
|
||||
(void *)(long)data.ip,
|
||||
(long long)data.period);
|
||||
dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
|
||||
data.pid, data.tid, data.ip, data.period);
|
||||
|
||||
thread = perf_session__findnew(session, event->ip.pid);
|
||||
if (thread == NULL) {
|
||||
@ -103,22 +101,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sample_type_check(struct perf_session *session)
|
||||
{
|
||||
if (!(session->sample_type & PERF_SAMPLE_RAW)) {
|
||||
fprintf(stderr,
|
||||
"No trace sample to read. Did you call perf record "
|
||||
"without -R?");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct perf_event_ops event_ops = {
|
||||
.process_sample_event = process_sample_event,
|
||||
.process_comm_event = event__process_comm,
|
||||
.sample_type_check = sample_type_check,
|
||||
.sample = process_sample_event,
|
||||
.comm = event__process_comm,
|
||||
};
|
||||
|
||||
static int __cmd_trace(struct perf_session *session)
|
||||
@ -235,9 +220,9 @@ static int parse_scriptname(const struct option *opt __used,
|
||||
const char *script, *ext;
|
||||
int len;
|
||||
|
||||
if (strcmp(str, "list") == 0) {
|
||||
if (strcmp(str, "lang") == 0) {
|
||||
list_available_languages();
|
||||
return 0;
|
||||
exit(0);
|
||||
}
|
||||
|
||||
script = strchr(str, ':');
|
||||
@ -531,6 +516,8 @@ static const struct option options[] = {
|
||||
parse_scriptname),
|
||||
OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
|
||||
"generate perf-trace.xx script in specified language"),
|
||||
OPT_STRING('i', "input", &input_name, "file",
|
||||
"input file name"),
|
||||
|
||||
OPT_END()
|
||||
};
|
||||
@ -592,6 +579,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
|
||||
if (session == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!perf_session__has_traces(session, "record -R"))
|
||||
return -EINVAL;
|
||||
|
||||
if (generate_script_lang) {
|
||||
struct stat perf_stat;
|
||||
|
||||
|
@ -16,6 +16,7 @@ extern int check_pager_config(const char *cmd);
|
||||
|
||||
extern int cmd_annotate(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_bench(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_buildid_cache(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_buildid_list(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_diff(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_help(int argc, const char **argv, const char *prefix);
|
||||
@ -30,5 +31,6 @@ extern int cmd_trace(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_version(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_probe(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_kmem(int argc, const char **argv, const char *prefix);
|
||||
extern int cmd_lock(int argc, const char **argv, const char *prefix);
|
||||
|
||||
#endif
|
||||
|
@ -3,7 +3,9 @@
|
||||
# command name category [deprecated] [common]
|
||||
#
|
||||
perf-annotate mainporcelain common
|
||||
perf-archive mainporcelain common
|
||||
perf-bench mainporcelain common
|
||||
perf-buildid-cache mainporcelain common
|
||||
perf-buildid-list mainporcelain common
|
||||
perf-diff mainporcelain common
|
||||
perf-list mainporcelain common
|
||||
|
@ -101,10 +101,10 @@ enum hw_event_ids {
|
||||
*/
|
||||
PERF_COUNT_HW_CPU_CYCLES = 0,
|
||||
PERF_COUNT_HW_INSTRUCTIONS = 1,
|
||||
PERF_COUNT_HW_CACHE_REFERENCES = 2,
|
||||
PERF_COUNT_HW_CACHE_REFERENCES = 2,
|
||||
PERF_COUNT_HW_CACHE_MISSES = 3,
|
||||
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
|
||||
PERF_COUNT_HW_BRANCH_MISSES = 5,
|
||||
PERF_COUNT_HW_BRANCH_MISSES = 5,
|
||||
PERF_COUNT_HW_BUS_CYCLES = 6,
|
||||
};
|
||||
|
||||
@ -131,8 +131,8 @@ software events, selected by 'event_id':
|
||||
*/
|
||||
enum sw_event_ids {
|
||||
PERF_COUNT_SW_CPU_CLOCK = 0,
|
||||
PERF_COUNT_SW_TASK_CLOCK = 1,
|
||||
PERF_COUNT_SW_PAGE_FAULTS = 2,
|
||||
PERF_COUNT_SW_TASK_CLOCK = 1,
|
||||
PERF_COUNT_SW_PAGE_FAULTS = 2,
|
||||
PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
|
||||
PERF_COUNT_SW_CPU_MIGRATIONS = 4,
|
||||
PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
|
||||
|
32
tools/perf/perf-archive.sh
Normal file
32
tools/perf/perf-archive.sh
Normal file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
# perf archive
|
||||
# Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
|
||||
PERF_DATA=perf.data
|
||||
if [ $# -ne 0 ] ; then
|
||||
PERF_DATA=$1
|
||||
fi
|
||||
|
||||
DEBUGDIR=~/.debug/
|
||||
BUILDIDS=$(mktemp /tmp/perf-archive-buildids.XXXXXX)
|
||||
|
||||
perf buildid-list -i $PERF_DATA --with-hits > $BUILDIDS
|
||||
if [ ! -s $BUILDIDS ] ; then
|
||||
echo "perf archive: no build-ids found"
|
||||
rm -f $BUILDIDS
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX)
|
||||
|
||||
cut -d ' ' -f 1 $BUILDIDS | \
|
||||
while read build_id ; do
|
||||
linkname=$DEBUGDIR.build-id/${build_id:0:2}/${build_id:2}
|
||||
filename=$(readlink -f $linkname)
|
||||
echo ${linkname#$DEBUGDIR} >> $MANIFEST
|
||||
echo ${filename#$DEBUGDIR} >> $MANIFEST
|
||||
done
|
||||
|
||||
tar cfj $PERF_DATA.tar.bz2 -C $DEBUGDIR -T $MANIFEST
|
||||
rm -f $MANIFEST $BUILDIDS
|
||||
exit 0
|
@ -48,7 +48,8 @@ int check_pager_config(const char *cmd)
|
||||
return c.val;
|
||||
}
|
||||
|
||||
static void commit_pager_choice(void) {
|
||||
static void commit_pager_choice(void)
|
||||
{
|
||||
switch (use_pager) {
|
||||
case 0:
|
||||
setenv("PERF_PAGER", "cat", 1);
|
||||
@ -70,7 +71,7 @@ static void set_debugfs_path(void)
|
||||
"tracing/events");
|
||||
}
|
||||
|
||||
static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||
static int handle_options(const char ***argv, int *argc, int *envchanged)
|
||||
{
|
||||
int handled = 0;
|
||||
|
||||
@ -109,7 +110,7 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||
*envchanged = 1;
|
||||
} else if (!strcmp(cmd, "--perf-dir")) {
|
||||
if (*argc < 2) {
|
||||
fprintf(stderr, "No directory given for --perf-dir.\n" );
|
||||
fprintf(stderr, "No directory given for --perf-dir.\n");
|
||||
usage(perf_usage_string);
|
||||
}
|
||||
setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1);
|
||||
@ -124,7 +125,7 @@ static int handle_options(const char*** argv, int* argc, int* envchanged)
|
||||
*envchanged = 1;
|
||||
} else if (!strcmp(cmd, "--work-tree")) {
|
||||
if (*argc < 2) {
|
||||
fprintf(stderr, "No directory given for --work-tree.\n" );
|
||||
fprintf(stderr, "No directory given for --work-tree.\n");
|
||||
usage(perf_usage_string);
|
||||
}
|
||||
setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
|
||||
@ -168,7 +169,7 @@ static int handle_alias(int *argcp, const char ***argv)
|
||||
{
|
||||
int envchanged = 0, ret = 0, saved_errno = errno;
|
||||
int count, option_count;
|
||||
const char** new_argv;
|
||||
const char **new_argv;
|
||||
const char *alias_command;
|
||||
char *alias_string;
|
||||
|
||||
@ -210,11 +211,11 @@ static int handle_alias(int *argcp, const char ***argv)
|
||||
if (!strcmp(alias_command, new_argv[0]))
|
||||
die("recursive alias: %s", alias_command);
|
||||
|
||||
new_argv = realloc(new_argv, sizeof(char*) *
|
||||
new_argv = realloc(new_argv, sizeof(char *) *
|
||||
(count + *argcp + 1));
|
||||
/* insert after command name */
|
||||
memcpy(new_argv + count, *argv + 1, sizeof(char*) * *argcp);
|
||||
new_argv[count+*argcp] = NULL;
|
||||
memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
|
||||
new_argv[count + *argcp] = NULL;
|
||||
|
||||
*argv = new_argv;
|
||||
*argcp += count - 1;
|
||||
@ -285,6 +286,7 @@ static void handle_internal_command(int argc, const char **argv)
|
||||
{
|
||||
const char *cmd = argv[0];
|
||||
static struct cmd_struct commands[] = {
|
||||
{ "buildid-cache", cmd_buildid_cache, 0 },
|
||||
{ "buildid-list", cmd_buildid_list, 0 },
|
||||
{ "diff", cmd_diff, 0 },
|
||||
{ "help", cmd_help, 0 },
|
||||
@ -301,6 +303,7 @@ static void handle_internal_command(int argc, const char **argv)
|
||||
{ "sched", cmd_sched, 0 },
|
||||
{ "probe", cmd_probe, 0 },
|
||||
{ "kmem", cmd_kmem, 0 },
|
||||
{ "lock", cmd_lock, 0 },
|
||||
};
|
||||
unsigned int i;
|
||||
static const char ext[] = STRIP_EXTENSION;
|
||||
@ -388,7 +391,7 @@ static int run_argv(int *argcp, const char ***argv)
|
||||
/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
|
||||
static void get_debugfs_mntpt(void)
|
||||
{
|
||||
const char *path = debugfs_find_mountpoint();
|
||||
const char *path = debugfs_mount(NULL);
|
||||
|
||||
if (path)
|
||||
strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
|
||||
@ -449,8 +452,8 @@ int main(int argc, const char **argv)
|
||||
setup_path();
|
||||
|
||||
while (1) {
|
||||
static int done_help = 0;
|
||||
static int was_alias = 0;
|
||||
static int done_help;
|
||||
static int was_alias;
|
||||
|
||||
was_alias = run_argv(&argc, &argv);
|
||||
if (errno != ENOENT)
|
||||
|
@ -31,13 +31,14 @@
|
||||
#include "EXTERN.h"
|
||||
#include "perl.h"
|
||||
#include "XSUB.h"
|
||||
#include "../../../util/trace-event-perl.h"
|
||||
#include "../../../perf.h"
|
||||
#include "../../../util/trace-event.h"
|
||||
|
||||
#ifndef PERL_UNUSED_VAR
|
||||
# define PERL_UNUSED_VAR(var) if (0) var = var
|
||||
#endif
|
||||
|
||||
#line 41 "Context.c"
|
||||
#line 42 "Context.c"
|
||||
|
||||
XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */
|
||||
XS(XS_Perf__Trace__Context_common_pc)
|
||||
|
@ -22,7 +22,8 @@
|
||||
#include "EXTERN.h"
|
||||
#include "perl.h"
|
||||
#include "XSUB.h"
|
||||
#include "../../../util/trace-event-perl.h"
|
||||
#include "../../../perf.h"
|
||||
#include "../../../util/trace-event.h"
|
||||
|
||||
MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context
|
||||
PROTOTYPES: ENABLE
|
||||
|
@ -44,7 +44,7 @@ sub nsecs_secs {
|
||||
sub nsecs_nsecs {
|
||||
my ($nsecs) = @_;
|
||||
|
||||
return $nsecs - nsecs_secs($nsecs);
|
||||
return $nsecs % $NSECS_PER_SEC;
|
||||
}
|
||||
|
||||
sub nsecs_str {
|
||||
|
@ -1,7 +1,2 @@
|
||||
#!/bin/bash
|
||||
perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry -e kmem:kfree
|
||||
|
@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
# description: useless but exhaustive test script
|
||||
perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl
|
||||
|
||||
|
||||
|
2
tools/perf/scripts/perl/bin/failed-syscalls-record
Normal file
2
tools/perf/scripts/perl/bin/failed-syscalls-record
Normal file
@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
|
4
tools/perf/scripts/perl/bin/failed-syscalls-report
Normal file
4
tools/perf/scripts/perl/bin/failed-syscalls-report
Normal file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
# description: system-wide failed syscalls
|
||||
# args: [comm]
|
||||
perf trace -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $1
|
38
tools/perf/scripts/perl/failed-syscalls.pl
Normal file
38
tools/perf/scripts/perl/failed-syscalls.pl
Normal file
@ -0,0 +1,38 @@
|
||||
# failed system call counts
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Displays system-wide failed system call totals
|
||||
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
|
||||
|
||||
use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
|
||||
use lib "./Perf-Trace-Util/lib";
|
||||
use Perf::Trace::Core;
|
||||
use Perf::Trace::Context;
|
||||
use Perf::Trace::Util;
|
||||
|
||||
my %failed_syscalls;
|
||||
|
||||
sub raw_syscalls::sys_exit
|
||||
{
|
||||
my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
|
||||
$common_pid, $common_comm,
|
||||
$id, $ret) = @_;
|
||||
|
||||
if ($ret < 0) {
|
||||
$failed_syscalls{$common_comm}++;
|
||||
}
|
||||
}
|
||||
|
||||
sub trace_end
|
||||
{
|
||||
printf("\nfailed syscalls by comm:\n\n");
|
||||
|
||||
printf("%-20s %10s\n", "comm", "# errors");
|
||||
printf("%-20s %6s %10s\n", "--------------------", "----------");
|
||||
|
||||
foreach my $comm (sort {$failed_syscalls{$b} <=> $failed_syscalls{$a}}
|
||||
keys %failed_syscalls) {
|
||||
printf("%-20s %10s\n", $comm, $failed_syscalls{$comm});
|
||||
}
|
||||
}
|
88
tools/perf/scripts/python/Perf-Trace-Util/Context.c
Normal file
88
tools/perf/scripts/python/Perf-Trace-Util/Context.c
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Context.c. Python interfaces for perf trace.
|
||||
*
|
||||
* Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#include <Python.h>
|
||||
#include "../../../perf.h"
|
||||
#include "../../../util/trace-event.h"
|
||||
|
||||
PyMODINIT_FUNC initperf_trace_context(void);
|
||||
|
||||
static PyObject *perf_trace_context_common_pc(PyObject *self, PyObject *args)
|
||||
{
|
||||
static struct scripting_context *scripting_context;
|
||||
PyObject *context;
|
||||
int retval;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "O", &context))
|
||||
return NULL;
|
||||
|
||||
scripting_context = PyCObject_AsVoidPtr(context);
|
||||
retval = common_pc(scripting_context);
|
||||
|
||||
return Py_BuildValue("i", retval);
|
||||
}
|
||||
|
||||
static PyObject *perf_trace_context_common_flags(PyObject *self,
|
||||
PyObject *args)
|
||||
{
|
||||
static struct scripting_context *scripting_context;
|
||||
PyObject *context;
|
||||
int retval;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "O", &context))
|
||||
return NULL;
|
||||
|
||||
scripting_context = PyCObject_AsVoidPtr(context);
|
||||
retval = common_flags(scripting_context);
|
||||
|
||||
return Py_BuildValue("i", retval);
|
||||
}
|
||||
|
||||
static PyObject *perf_trace_context_common_lock_depth(PyObject *self,
|
||||
PyObject *args)
|
||||
{
|
||||
static struct scripting_context *scripting_context;
|
||||
PyObject *context;
|
||||
int retval;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "O", &context))
|
||||
return NULL;
|
||||
|
||||
scripting_context = PyCObject_AsVoidPtr(context);
|
||||
retval = common_lock_depth(scripting_context);
|
||||
|
||||
return Py_BuildValue("i", retval);
|
||||
}
|
||||
|
||||
static PyMethodDef ContextMethods[] = {
|
||||
{ "common_pc", perf_trace_context_common_pc, METH_VARARGS,
|
||||
"Get the common preempt count event field value."},
|
||||
{ "common_flags", perf_trace_context_common_flags, METH_VARARGS,
|
||||
"Get the common flags event field value."},
|
||||
{ "common_lock_depth", perf_trace_context_common_lock_depth,
|
||||
METH_VARARGS, "Get the common lock depth event field value."},
|
||||
{ NULL, NULL, 0, NULL}
|
||||
};
|
||||
|
||||
PyMODINIT_FUNC initperf_trace_context(void)
|
||||
{
|
||||
(void) Py_InitModule("perf_trace_context", ContextMethods);
|
||||
}
|
@ -0,0 +1,91 @@
|
||||
# Core.py - Python extension for perf trace, core functions
|
||||
#
|
||||
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
|
||||
#
|
||||
# This software may be distributed under the terms of the GNU General
|
||||
# Public License ("GPL") version 2 as published by the Free Software
|
||||
# Foundation.
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
def autodict():
|
||||
return defaultdict(autodict)
|
||||
|
||||
flag_fields = autodict()
|
||||
symbolic_fields = autodict()
|
||||
|
||||
def define_flag_field(event_name, field_name, delim):
|
||||
flag_fields[event_name][field_name]['delim'] = delim
|
||||
|
||||
def define_flag_value(event_name, field_name, value, field_str):
|
||||
flag_fields[event_name][field_name]['values'][value] = field_str
|
||||
|
||||
def define_symbolic_field(event_name, field_name):
|
||||
# nothing to do, really
|
||||
pass
|
||||
|
||||
def define_symbolic_value(event_name, field_name, value, field_str):
|
||||
symbolic_fields[event_name][field_name]['values'][value] = field_str
|
||||
|
||||
def flag_str(event_name, field_name, value):
|
||||
string = ""
|
||||
|
||||
if flag_fields[event_name][field_name]:
|
||||
print_delim = 0
|
||||
keys = flag_fields[event_name][field_name]['values'].keys()
|
||||
keys.sort()
|
||||
for idx in keys:
|
||||
if not value and not idx:
|
||||
string += flag_fields[event_name][field_name]['values'][idx]
|
||||
break
|
||||
if idx and (value & idx) == idx:
|
||||
if print_delim and flag_fields[event_name][field_name]['delim']:
|
||||
string += " " + flag_fields[event_name][field_name]['delim'] + " "
|
||||
string += flag_fields[event_name][field_name]['values'][idx]
|
||||
print_delim = 1
|
||||
value &= ~idx
|
||||
|
||||
return string
|
||||
|
||||
def symbol_str(event_name, field_name, value):
|
||||
string = ""
|
||||
|
||||
if symbolic_fields[event_name][field_name]:
|
||||
keys = symbolic_fields[event_name][field_name]['values'].keys()
|
||||
keys.sort()
|
||||
for idx in keys:
|
||||
if not value and not idx:
|
||||
string = symbolic_fields[event_name][field_name]['values'][idx]
|
||||
break
|
||||
if (value == idx):
|
||||
string = symbolic_fields[event_name][field_name]['values'][idx]
|
||||
break
|
||||
|
||||
return string
|
||||
|
||||
trace_flags = { 0x00: "NONE", \
|
||||
0x01: "IRQS_OFF", \
|
||||
0x02: "IRQS_NOSUPPORT", \
|
||||
0x04: "NEED_RESCHED", \
|
||||
0x08: "HARDIRQ", \
|
||||
0x10: "SOFTIRQ" }
|
||||
|
||||
def trace_flag_str(value):
|
||||
string = ""
|
||||
print_delim = 0
|
||||
|
||||
keys = trace_flags.keys()
|
||||
|
||||
for idx in keys:
|
||||
if not value and not idx:
|
||||
string += "NONE"
|
||||
break
|
||||
|
||||
if idx and (value & idx) == idx:
|
||||
if print_delim:
|
||||
string += " | ";
|
||||
string += trace_flags[idx]
|
||||
print_delim = 1
|
||||
value &= ~idx
|
||||
|
||||
return string
|
@ -0,0 +1,25 @@
|
||||
# Util.py - Python extension for perf trace, miscellaneous utility code
|
||||
#
|
||||
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
|
||||
#
|
||||
# This software may be distributed under the terms of the GNU General
|
||||
# Public License ("GPL") version 2 as published by the Free Software
|
||||
# Foundation.
|
||||
|
||||
NSECS_PER_SEC = 1000000000
|
||||
|
||||
def avg(total, n):
|
||||
return total / n
|
||||
|
||||
def nsecs(secs, nsecs):
|
||||
return secs * NSECS_PER_SEC + nsecs
|
||||
|
||||
def nsecs_secs(nsecs):
|
||||
return nsecs / NSECS_PER_SEC
|
||||
|
||||
def nsecs_nsecs(nsecs):
|
||||
return nsecs % NSECS_PER_SEC
|
||||
|
||||
def nsecs_str(nsecs):
|
||||
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
|
||||
return str
|
@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
perf record -c 1 -f -a -M -R -e raw_syscalls:sys_exit
|
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
# description: system-wide failed syscalls, by pid
|
||||
# args: [comm]
|
||||
perf trace -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $1
|
@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
|
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
# description: system-wide syscall counts, by pid
|
||||
# args: [comm]
|
||||
perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $1
|
2
tools/perf/scripts/python/bin/syscall-counts-record
Normal file
2
tools/perf/scripts/python/bin/syscall-counts-record
Normal file
@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
perf record -c 1 -f -a -M -R -e raw_syscalls:sys_enter
|
4
tools/perf/scripts/python/bin/syscall-counts-report
Normal file
4
tools/perf/scripts/python/bin/syscall-counts-report
Normal file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
# description: system-wide syscall counts
|
||||
# args: [comm]
|
||||
perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py $1
|
83
tools/perf/scripts/python/check-perf-trace.py
Normal file
83
tools/perf/scripts/python/check-perf-trace.py
Normal file
@ -0,0 +1,83 @@
|
||||
# perf trace event handlers, generated by perf trace -g python
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# This script tests basic functionality such as flag and symbol
|
||||
# strings, common_xxx() calls back into perf, begin, end, unhandled
|
||||
# events, etc. Basically, if this script runs successfully and
|
||||
# displays expected results, Python scripting support should be ok.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from Core import *
|
||||
from perf_trace_context import *
|
||||
|
||||
unhandled = autodict()
|
||||
|
||||
def trace_begin():
|
||||
print "trace_begin"
|
||||
pass
|
||||
|
||||
def trace_end():
|
||||
print_unhandled()
|
||||
|
||||
def irq__softirq_entry(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
vec):
|
||||
print_header(event_name, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm)
|
||||
|
||||
print_uncommon(context)
|
||||
|
||||
print "vec=%s\n" % \
|
||||
(symbol_str("irq__softirq_entry", "vec", vec)),
|
||||
|
||||
def kmem__kmalloc(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
call_site, ptr, bytes_req, bytes_alloc,
|
||||
gfp_flags):
|
||||
print_header(event_name, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm)
|
||||
|
||||
print_uncommon(context)
|
||||
|
||||
print "call_site=%u, ptr=%u, bytes_req=%u, " \
|
||||
"bytes_alloc=%u, gfp_flags=%s\n" % \
|
||||
(call_site, ptr, bytes_req, bytes_alloc,
|
||||
|
||||
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
|
||||
|
||||
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
|
||||
common_pid, common_comm):
|
||||
try:
|
||||
unhandled[event_name] += 1
|
||||
except TypeError:
|
||||
unhandled[event_name] = 1
|
||||
|
||||
def print_header(event_name, cpu, secs, nsecs, pid, comm):
|
||||
print "%-20s %5u %05u.%09u %8u %-20s " % \
|
||||
(event_name, cpu, secs, nsecs, pid, comm),
|
||||
|
||||
# print trace fields not included in handler args
|
||||
def print_uncommon(context):
|
||||
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
|
||||
% (common_pc(context), trace_flag_str(common_flags(context)), \
|
||||
common_lock_depth(context))
|
||||
|
||||
def print_unhandled():
|
||||
keys = unhandled.keys()
|
||||
if not keys:
|
||||
return
|
||||
|
||||
print "\nunhandled events:\n\n",
|
||||
|
||||
print "%-40s %10s\n" % ("event", "count"),
|
||||
print "%-40s %10s\n" % ("----------------------------------------", \
|
||||
"-----------"),
|
||||
|
||||
for event_name in keys:
|
||||
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
68
tools/perf/scripts/python/failed-syscalls-by-pid.py
Normal file
68
tools/perf/scripts/python/failed-syscalls-by-pid.py
Normal file
@ -0,0 +1,68 @@
|
||||
# failed system call counts, by pid
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Displays system-wide failed system call totals, broken down by pid.
|
||||
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
|
||||
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
|
||||
|
||||
for_comm = None
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
sys.exit(usage)
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
for_comm = sys.argv[1]
|
||||
|
||||
syscalls = autodict()
|
||||
|
||||
def trace_begin():
|
||||
pass
|
||||
|
||||
def trace_end():
|
||||
print_error_totals()
|
||||
|
||||
def raw_syscalls__sys_exit(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, ret):
|
||||
if for_comm is not None:
|
||||
if common_comm != for_comm:
|
||||
return
|
||||
|
||||
if ret < 0:
|
||||
try:
|
||||
syscalls[common_comm][common_pid][id][ret] += 1
|
||||
except TypeError:
|
||||
syscalls[common_comm][common_pid][id][ret] = 1
|
||||
|
||||
def print_error_totals():
|
||||
if for_comm is not None:
|
||||
print "\nsyscall errors for %s:\n\n" % (for_comm),
|
||||
else:
|
||||
print "\nsyscall errors:\n\n",
|
||||
|
||||
print "%-30s %10s\n" % ("comm [pid]", "count"),
|
||||
print "%-30s %10s\n" % ("------------------------------", \
|
||||
"----------"),
|
||||
|
||||
comm_keys = syscalls.keys()
|
||||
for comm in comm_keys:
|
||||
pid_keys = syscalls[comm].keys()
|
||||
for pid in pid_keys:
|
||||
print "\n%s [%d]\n" % (comm, pid),
|
||||
id_keys = syscalls[comm][pid].keys()
|
||||
for id in id_keys:
|
||||
print " syscall: %-16d\n" % (id),
|
||||
ret_keys = syscalls[comm][pid][id].keys()
|
||||
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
|
||||
print " err = %-20d %10d\n" % (ret, val),
|
64
tools/perf/scripts/python/syscall-counts-by-pid.py
Normal file
64
tools/perf/scripts/python/syscall-counts-by-pid.py
Normal file
@ -0,0 +1,64 @@
|
||||
# system call counts, by pid
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Displays system-wide system call totals, broken down by syscall.
|
||||
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
|
||||
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
|
||||
|
||||
for_comm = None
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
sys.exit(usage)
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
for_comm = sys.argv[1]
|
||||
|
||||
syscalls = autodict()
|
||||
|
||||
def trace_begin():
|
||||
pass
|
||||
|
||||
def trace_end():
|
||||
print_syscall_totals()
|
||||
|
||||
def raw_syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, args):
|
||||
if for_comm is not None:
|
||||
if common_comm != for_comm:
|
||||
return
|
||||
try:
|
||||
syscalls[common_comm][common_pid][id] += 1
|
||||
except TypeError:
|
||||
syscalls[common_comm][common_pid][id] = 1
|
||||
|
||||
def print_syscall_totals():
|
||||
if for_comm is not None:
|
||||
print "\nsyscall events for %s:\n\n" % (for_comm),
|
||||
else:
|
||||
print "\nsyscall events by comm/pid:\n\n",
|
||||
|
||||
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
|
||||
print "%-40s %10s\n" % ("----------------------------------------", \
|
||||
"----------"),
|
||||
|
||||
comm_keys = syscalls.keys()
|
||||
for comm in comm_keys:
|
||||
pid_keys = syscalls[comm].keys()
|
||||
for pid in pid_keys:
|
||||
print "\n%s [%d]\n" % (comm, pid),
|
||||
id_keys = syscalls[comm][pid].keys()
|
||||
for id, val in sorted(syscalls[comm][pid].iteritems(), \
|
||||
key = lambda(k, v): (v, k), reverse = True):
|
||||
print " %-38d %10d\n" % (id, val),
|
58
tools/perf/scripts/python/syscall-counts.py
Normal file
58
tools/perf/scripts/python/syscall-counts.py
Normal file
@ -0,0 +1,58 @@
|
||||
# system call counts
|
||||
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
|
||||
# Licensed under the terms of the GNU GPL License version 2
|
||||
#
|
||||
# Displays system-wide system call totals, broken down by syscall.
|
||||
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
|
||||
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
|
||||
|
||||
from perf_trace_context import *
|
||||
from Core import *
|
||||
|
||||
usage = "perf trace -s syscall-counts.py [comm]\n";
|
||||
|
||||
for_comm = None
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
sys.exit(usage)
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
for_comm = sys.argv[1]
|
||||
|
||||
syscalls = autodict()
|
||||
|
||||
def trace_begin():
|
||||
pass
|
||||
|
||||
def trace_end():
|
||||
print_syscall_totals()
|
||||
|
||||
def raw_syscalls__sys_enter(event_name, context, common_cpu,
|
||||
common_secs, common_nsecs, common_pid, common_comm,
|
||||
id, args):
|
||||
if for_comm is not None:
|
||||
if common_comm != for_comm:
|
||||
return
|
||||
try:
|
||||
syscalls[id] += 1
|
||||
except TypeError:
|
||||
syscalls[id] = 1
|
||||
|
||||
def print_syscall_totals():
|
||||
if for_comm is not None:
|
||||
print "\nsyscall events for %s:\n\n" % (for_comm),
|
||||
else:
|
||||
print "\nsyscall events:\n\n",
|
||||
|
||||
print "%-40s %10s\n" % ("event", "count"),
|
||||
print "%-40s %10s\n" % ("----------------------------------------", \
|
||||
"-----------"),
|
||||
|
||||
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
|
||||
reverse = True):
|
||||
print "%-40d %10d\n" % (id, val),
|
39
tools/perf/util/build-id.c
Normal file
39
tools/perf/util/build-id.c
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* build-id.c
|
||||
*
|
||||
* build-id support
|
||||
*
|
||||
* Copyright (C) 2009, 2010 Red Hat Inc.
|
||||
* Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*/
|
||||
#include "build-id.h"
|
||||
#include "event.h"
|
||||
#include "symbol.h"
|
||||
#include <linux/kernel.h>
|
||||
|
||||
static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
|
||||
{
|
||||
struct addr_location al;
|
||||
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
struct thread *thread = perf_session__findnew(session, event->ip.pid);
|
||||
|
||||
if (thread == NULL) {
|
||||
pr_err("problem processing %d event, skipping it.\n",
|
||||
event->header.type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
|
||||
event->ip.ip, &al);
|
||||
|
||||
if (al.map != NULL)
|
||||
al.map->dso->hit = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct perf_event_ops build_id__mark_dso_hit_ops = {
|
||||
.sample = build_id__mark_dso_hit,
|
||||
.mmap = event__process_mmap,
|
||||
.fork = event__process_task,
|
||||
};
|
8
tools/perf/util/build-id.h
Normal file
8
tools/perf/util/build-id.h
Normal file
@ -0,0 +1,8 @@
|
||||
#ifndef PERF_BUILD_ID_H_
|
||||
#define PERF_BUILD_ID_H_ 1
|
||||
|
||||
#include "session.h"
|
||||
|
||||
extern struct perf_event_ops build_id__mark_dso_hit_ops;
|
||||
|
||||
#endif
|
@ -1,252 +0,0 @@
|
||||
#include "symbol.h"
|
||||
#include "util.h"
|
||||
#include "debug.h"
|
||||
#include "thread.h"
|
||||
#include "session.h"
|
||||
|
||||
static int process_event_stub(event_t *event __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
dump_printf(": unhandled!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
|
||||
{
|
||||
if (!handler->process_sample_event)
|
||||
handler->process_sample_event = process_event_stub;
|
||||
if (!handler->process_mmap_event)
|
||||
handler->process_mmap_event = process_event_stub;
|
||||
if (!handler->process_comm_event)
|
||||
handler->process_comm_event = process_event_stub;
|
||||
if (!handler->process_fork_event)
|
||||
handler->process_fork_event = process_event_stub;
|
||||
if (!handler->process_exit_event)
|
||||
handler->process_exit_event = process_event_stub;
|
||||
if (!handler->process_lost_event)
|
||||
handler->process_lost_event = process_event_stub;
|
||||
if (!handler->process_read_event)
|
||||
handler->process_read_event = process_event_stub;
|
||||
if (!handler->process_throttle_event)
|
||||
handler->process_throttle_event = process_event_stub;
|
||||
if (!handler->process_unthrottle_event)
|
||||
handler->process_unthrottle_event = process_event_stub;
|
||||
}
|
||||
|
||||
static const char *event__name[] = {
|
||||
[0] = "TOTAL",
|
||||
[PERF_RECORD_MMAP] = "MMAP",
|
||||
[PERF_RECORD_LOST] = "LOST",
|
||||
[PERF_RECORD_COMM] = "COMM",
|
||||
[PERF_RECORD_EXIT] = "EXIT",
|
||||
[PERF_RECORD_THROTTLE] = "THROTTLE",
|
||||
[PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
|
||||
[PERF_RECORD_FORK] = "FORK",
|
||||
[PERF_RECORD_READ] = "READ",
|
||||
[PERF_RECORD_SAMPLE] = "SAMPLE",
|
||||
};
|
||||
|
||||
unsigned long event__total[PERF_RECORD_MAX];
|
||||
|
||||
void event__print_totals(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < PERF_RECORD_MAX; ++i)
|
||||
pr_info("%10s events: %10ld\n",
|
||||
event__name[i], event__total[i]);
|
||||
}
|
||||
|
||||
static int process_event(event_t *event, struct perf_session *session,
|
||||
struct perf_event_ops *ops,
|
||||
unsigned long offset, unsigned long head)
|
||||
{
|
||||
trace_event(event);
|
||||
|
||||
if (event->header.type < PERF_RECORD_MAX) {
|
||||
dump_printf("%p [%p]: PERF_RECORD_%s",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)(event->header.size),
|
||||
event__name[event->header.type]);
|
||||
++event__total[0];
|
||||
++event__total[event->header.type];
|
||||
}
|
||||
|
||||
switch (event->header.type) {
|
||||
case PERF_RECORD_SAMPLE:
|
||||
return ops->process_sample_event(event, session);
|
||||
case PERF_RECORD_MMAP:
|
||||
return ops->process_mmap_event(event, session);
|
||||
case PERF_RECORD_COMM:
|
||||
return ops->process_comm_event(event, session);
|
||||
case PERF_RECORD_FORK:
|
||||
return ops->process_fork_event(event, session);
|
||||
case PERF_RECORD_EXIT:
|
||||
return ops->process_exit_event(event, session);
|
||||
case PERF_RECORD_LOST:
|
||||
return ops->process_lost_event(event, session);
|
||||
case PERF_RECORD_READ:
|
||||
return ops->process_read_event(event, session);
|
||||
case PERF_RECORD_THROTTLE:
|
||||
return ops->process_throttle_event(event, session);
|
||||
case PERF_RECORD_UNTHROTTLE:
|
||||
return ops->process_unthrottle_event(event, session);
|
||||
default:
|
||||
ops->total_unknown++;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int perf_header__read_build_ids(int input, u64 offset, u64 size)
|
||||
{
|
||||
struct build_id_event bev;
|
||||
char filename[PATH_MAX];
|
||||
u64 limit = offset + size;
|
||||
int err = -1;
|
||||
|
||||
while (offset < limit) {
|
||||
struct dso *dso;
|
||||
ssize_t len;
|
||||
|
||||
if (read(input, &bev, sizeof(bev)) != sizeof(bev))
|
||||
goto out;
|
||||
|
||||
len = bev.header.size - sizeof(bev);
|
||||
if (read(input, filename, len) != len)
|
||||
goto out;
|
||||
|
||||
dso = dsos__findnew(filename);
|
||||
if (dso != NULL)
|
||||
dso__set_build_id(dso, &bev.build_id);
|
||||
|
||||
offset += bev.header.size;
|
||||
}
|
||||
err = 0;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
|
||||
{
|
||||
struct thread *thread = perf_session__findnew(self, 0);
|
||||
|
||||
if (!thread || thread__set_comm(thread, "swapper")) {
|
||||
pr_err("problem inserting idle task.\n");
|
||||
thread = NULL;
|
||||
}
|
||||
|
||||
return thread;
|
||||
}
|
||||
|
||||
int perf_session__process_events(struct perf_session *self,
|
||||
struct perf_event_ops *ops)
|
||||
{
|
||||
int err;
|
||||
unsigned long head, shift;
|
||||
unsigned long offset = 0;
|
||||
size_t page_size;
|
||||
event_t *event;
|
||||
uint32_t size;
|
||||
char *buf;
|
||||
|
||||
if (perf_session__register_idle_thread(self) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
perf_event_ops__fill_defaults(ops);
|
||||
|
||||
page_size = getpagesize();
|
||||
|
||||
head = self->header.data_offset;
|
||||
self->sample_type = perf_header__sample_type(&self->header);
|
||||
|
||||
err = -EINVAL;
|
||||
if (ops->sample_type_check && ops->sample_type_check(self) < 0)
|
||||
goto out_err;
|
||||
|
||||
if (!ops->full_paths) {
|
||||
char bf[PATH_MAX];
|
||||
|
||||
if (getcwd(bf, sizeof(bf)) == NULL) {
|
||||
err = -errno;
|
||||
out_getcwd_err:
|
||||
pr_err("failed to get the current directory\n");
|
||||
goto out_err;
|
||||
}
|
||||
self->cwd = strdup(bf);
|
||||
if (self->cwd == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto out_getcwd_err;
|
||||
}
|
||||
self->cwdlen = strlen(self->cwd);
|
||||
}
|
||||
|
||||
shift = page_size * (head / page_size);
|
||||
offset += shift;
|
||||
head -= shift;
|
||||
|
||||
remap:
|
||||
buf = mmap(NULL, page_size * self->mmap_window, PROT_READ,
|
||||
MAP_SHARED, self->fd, offset);
|
||||
if (buf == MAP_FAILED) {
|
||||
pr_err("failed to mmap file\n");
|
||||
err = -errno;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
more:
|
||||
event = (event_t *)(buf + head);
|
||||
|
||||
size = event->header.size;
|
||||
if (!size)
|
||||
size = 8;
|
||||
|
||||
if (head + event->header.size >= page_size * self->mmap_window) {
|
||||
int munmap_ret;
|
||||
|
||||
shift = page_size * (head / page_size);
|
||||
|
||||
munmap_ret = munmap(buf, page_size * self->mmap_window);
|
||||
assert(munmap_ret == 0);
|
||||
|
||||
offset += shift;
|
||||
head -= shift;
|
||||
goto remap;
|
||||
}
|
||||
|
||||
size = event->header.size;
|
||||
|
||||
dump_printf("\n%p [%p]: event: %d\n",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)event->header.size,
|
||||
event->header.type);
|
||||
|
||||
if (!size || process_event(event, self, ops, offset, head) < 0) {
|
||||
|
||||
dump_printf("%p [%p]: skipping unknown header type: %d\n",
|
||||
(void *)(offset + head),
|
||||
(void *)(long)(event->header.size),
|
||||
event->header.type);
|
||||
|
||||
/*
|
||||
* assume we lost track of the stream, check alignment, and
|
||||
* increment a single u64 in the hope to catch on again 'soon'.
|
||||
*/
|
||||
|
||||
if (unlikely(head & 7))
|
||||
head &= ~7ULL;
|
||||
|
||||
size = 8;
|
||||
}
|
||||
|
||||
head += size;
|
||||
|
||||
if (offset + head >= self->header.data_offset + self->header.data_size)
|
||||
goto done;
|
||||
|
||||
if (offset + head < self->size)
|
||||
goto more;
|
||||
|
||||
done:
|
||||
err = 0;
|
||||
out_err:
|
||||
return err;
|
||||
}
|
@ -9,6 +9,7 @@
|
||||
#include "color.h"
|
||||
#include "event.h"
|
||||
#include "debug.h"
|
||||
#include "util.h"
|
||||
|
||||
int verbose = 0;
|
||||
int dump_trace = 0;
|
||||
|
@ -106,16 +106,14 @@ int debugfs_valid_entry(const char *path)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* mount the debugfs somewhere */
|
||||
/* mount the debugfs somewhere if it's not mounted */
|
||||
|
||||
int debugfs_mount(const char *mountpoint)
|
||||
char *debugfs_mount(const char *mountpoint)
|
||||
{
|
||||
char mountcmd[128];
|
||||
|
||||
/* see if it's already mounted */
|
||||
if (debugfs_find_mountpoint()) {
|
||||
debugfs_premounted = 1;
|
||||
return 0;
|
||||
return debugfs_mountpoint;
|
||||
}
|
||||
|
||||
/* if not mounted and no argument */
|
||||
@ -127,13 +125,14 @@ int debugfs_mount(const char *mountpoint)
|
||||
mountpoint = "/sys/kernel/debug";
|
||||
}
|
||||
|
||||
if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
|
||||
return NULL;
|
||||
|
||||
/* save the mountpoint */
|
||||
strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
|
||||
debugfs_found = 1;
|
||||
|
||||
/* mount it */
|
||||
snprintf(mountcmd, sizeof(mountcmd),
|
||||
"/bin/mount -t debugfs debugfs %s", mountpoint);
|
||||
return system(mountcmd);
|
||||
return debugfs_mountpoint;
|
||||
}
|
||||
|
||||
/* umount the debugfs */
|
||||
|
@ -15,7 +15,7 @@
|
||||
extern const char *debugfs_find_mountpoint(void);
|
||||
extern int debugfs_valid_mountpoint(const char *debugfs);
|
||||
extern int debugfs_valid_entry(const char *path);
|
||||
extern int debugfs_mount(const char *mountpoint);
|
||||
extern char *debugfs_mount(const char *mountpoint);
|
||||
extern int debugfs_umount(void);
|
||||
extern int debugfs_write(const char *entry, const char *value);
|
||||
extern int debugfs_read(const char *entry, char *buffer, size_t size);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user