mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-29 13:00:35 +00:00
perf/core: Implement the 'perf_uprobe' PMU
This patch adds perf_uprobe support with similar pattern as previous patch (for kprobe). Two functions, create_local_trace_uprobe() and destroy_local_trace_uprobe(), are created so a uprobe can be created and attached to the file descriptor created by perf_event_open(). Signed-off-by: Song Liu <songliubraving@fb.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Yonghong Song <yhs@fb.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Cc: <daniel@iogearbox.net> Cc: <davem@davemloft.net> Cc: <kernel-team@fb.com> Cc: <rostedt@goodmis.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20171206224518.3598254-7-songliubraving@fb.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e12f03d703
commit
33ea4b2427
@ -537,6 +537,10 @@ extern void perf_trace_del(struct perf_event *event, int flags);
|
|||||||
extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
|
extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
|
||||||
extern void perf_kprobe_destroy(struct perf_event *event);
|
extern void perf_kprobe_destroy(struct perf_event *event);
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_UPROBE_EVENTS
|
||||||
|
extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
|
||||||
|
extern void perf_uprobe_destroy(struct perf_event *event);
|
||||||
|
#endif
|
||||||
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
||||||
char *filter_str);
|
char *filter_str);
|
||||||
extern void ftrace_profile_free_filter(struct perf_event *event);
|
extern void ftrace_profile_free_filter(struct perf_event *event);
|
||||||
|
@ -7992,7 +7992,7 @@ static struct pmu perf_tracepoint = {
|
|||||||
.read = perf_swevent_read,
|
.read = perf_swevent_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_KPROBE_EVENTS
|
#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
|
||||||
/*
|
/*
|
||||||
* Flags in config, used by dynamic PMU kprobe and uprobe
|
* Flags in config, used by dynamic PMU kprobe and uprobe
|
||||||
* The flags should match following PMU_FORMAT_ATTR().
|
* The flags should match following PMU_FORMAT_ATTR().
|
||||||
@ -8020,7 +8020,9 @@ static const struct attribute_group *probe_attr_groups[] = {
|
|||||||
&probe_format_group,
|
&probe_format_group,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_KPROBE_EVENTS
|
||||||
static int perf_kprobe_event_init(struct perf_event *event);
|
static int perf_kprobe_event_init(struct perf_event *event);
|
||||||
static struct pmu perf_kprobe = {
|
static struct pmu perf_kprobe = {
|
||||||
.task_ctx_nr = perf_sw_context,
|
.task_ctx_nr = perf_sw_context,
|
||||||
@ -8057,12 +8059,52 @@ static int perf_kprobe_event_init(struct perf_event *event)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_KPROBE_EVENTS */
|
#endif /* CONFIG_KPROBE_EVENTS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_UPROBE_EVENTS
|
||||||
|
static int perf_uprobe_event_init(struct perf_event *event);
|
||||||
|
static struct pmu perf_uprobe = {
|
||||||
|
.task_ctx_nr = perf_sw_context,
|
||||||
|
.event_init = perf_uprobe_event_init,
|
||||||
|
.add = perf_trace_add,
|
||||||
|
.del = perf_trace_del,
|
||||||
|
.start = perf_swevent_start,
|
||||||
|
.stop = perf_swevent_stop,
|
||||||
|
.read = perf_swevent_read,
|
||||||
|
.attr_groups = probe_attr_groups,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int perf_uprobe_event_init(struct perf_event *event)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
bool is_retprobe;
|
||||||
|
|
||||||
|
if (event->attr.type != perf_uprobe.type)
|
||||||
|
return -ENOENT;
|
||||||
|
/*
|
||||||
|
* no branch sampling for probe events
|
||||||
|
*/
|
||||||
|
if (has_branch_stack(event))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
|
||||||
|
err = perf_uprobe_init(event, is_retprobe);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
event->destroy = perf_uprobe_destroy;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_UPROBE_EVENTS */
|
||||||
|
|
||||||
static inline void perf_tp_register(void)
|
static inline void perf_tp_register(void)
|
||||||
{
|
{
|
||||||
perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
|
perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
|
||||||
#ifdef CONFIG_KPROBE_EVENTS
|
#ifdef CONFIG_KPROBE_EVENTS
|
||||||
perf_pmu_register(&perf_kprobe, "kprobe", -1);
|
perf_pmu_register(&perf_kprobe, "kprobe", -1);
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_UPROBE_EVENTS
|
||||||
|
perf_pmu_register(&perf_uprobe, "uprobe", -1);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void perf_event_free_filter(struct perf_event *event)
|
static void perf_event_free_filter(struct perf_event *event)
|
||||||
@ -8150,6 +8192,10 @@ static inline bool perf_event_is_tracing(struct perf_event *event)
|
|||||||
#ifdef CONFIG_KPROBE_EVENTS
|
#ifdef CONFIG_KPROBE_EVENTS
|
||||||
if (event->pmu == &perf_kprobe)
|
if (event->pmu == &perf_kprobe)
|
||||||
return true;
|
return true;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_UPROBE_EVENTS
|
||||||
|
if (event->pmu == &perf_uprobe)
|
||||||
|
return true;
|
||||||
#endif
|
#endif
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -286,6 +286,59 @@ void perf_kprobe_destroy(struct perf_event *p_event)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_KPROBE_EVENTS */
|
#endif /* CONFIG_KPROBE_EVENTS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_UPROBE_EVENTS
|
||||||
|
int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
char *path = NULL;
|
||||||
|
struct trace_event_call *tp_event;
|
||||||
|
|
||||||
|
if (!p_event->attr.uprobe_path)
|
||||||
|
return -EINVAL;
|
||||||
|
path = kzalloc(PATH_MAX, GFP_KERNEL);
|
||||||
|
if (!path)
|
||||||
|
return -ENOMEM;
|
||||||
|
ret = strncpy_from_user(
|
||||||
|
path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
|
if (path[0] == '\0') {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
tp_event = create_local_trace_uprobe(
|
||||||
|
path, p_event->attr.probe_offset, is_retprobe);
|
||||||
|
if (IS_ERR(tp_event)) {
|
||||||
|
ret = PTR_ERR(tp_event);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* local trace_uprobe need to hold event_mutex to call
|
||||||
|
* uprobe_buffer_enable() and uprobe_buffer_disable().
|
||||||
|
* event_mutex is not required for local trace_kprobes.
|
||||||
|
*/
|
||||||
|
mutex_lock(&event_mutex);
|
||||||
|
ret = perf_trace_event_init(tp_event, p_event);
|
||||||
|
if (ret)
|
||||||
|
destroy_local_trace_uprobe(tp_event);
|
||||||
|
mutex_unlock(&event_mutex);
|
||||||
|
out:
|
||||||
|
kfree(path);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void perf_uprobe_destroy(struct perf_event *p_event)
|
||||||
|
{
|
||||||
|
mutex_lock(&event_mutex);
|
||||||
|
perf_trace_event_close(p_event);
|
||||||
|
perf_trace_event_unreg(p_event);
|
||||||
|
mutex_unlock(&event_mutex);
|
||||||
|
destroy_local_trace_uprobe(p_event->tp_event);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_UPROBE_EVENTS */
|
||||||
|
|
||||||
int perf_trace_add(struct perf_event *p_event, int flags)
|
int perf_trace_add(struct perf_event *p_event, int flags)
|
||||||
{
|
{
|
||||||
struct trace_event_call *tp_event = p_event->tp_event;
|
struct trace_event_call *tp_event = p_event->tp_event;
|
||||||
|
@ -410,4 +410,8 @@ extern struct trace_event_call *
|
|||||||
create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
|
create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
|
||||||
bool is_return);
|
bool is_return);
|
||||||
extern void destroy_local_trace_kprobe(struct trace_event_call *event_call);
|
extern void destroy_local_trace_kprobe(struct trace_event_call *event_call);
|
||||||
|
|
||||||
|
extern struct trace_event_call *
|
||||||
|
create_local_trace_uprobe(char *name, unsigned long offs, bool is_return);
|
||||||
|
extern void destroy_local_trace_uprobe(struct trace_event_call *event_call);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1292,16 +1292,25 @@ static struct trace_event_functions uprobe_funcs = {
|
|||||||
.trace = print_uprobe_event
|
.trace = print_uprobe_event
|
||||||
};
|
};
|
||||||
|
|
||||||
static int register_uprobe_event(struct trace_uprobe *tu)
|
static inline void init_trace_event_call(struct trace_uprobe *tu,
|
||||||
|
struct trace_event_call *call)
|
||||||
{
|
{
|
||||||
struct trace_event_call *call = &tu->tp.call;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* Initialize trace_event_call */
|
|
||||||
INIT_LIST_HEAD(&call->class->fields);
|
INIT_LIST_HEAD(&call->class->fields);
|
||||||
call->event.funcs = &uprobe_funcs;
|
call->event.funcs = &uprobe_funcs;
|
||||||
call->class->define_fields = uprobe_event_define_fields;
|
call->class->define_fields = uprobe_event_define_fields;
|
||||||
|
|
||||||
|
call->flags = TRACE_EVENT_FL_UPROBE;
|
||||||
|
call->class->reg = trace_uprobe_register;
|
||||||
|
call->data = tu;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int register_uprobe_event(struct trace_uprobe *tu)
|
||||||
|
{
|
||||||
|
struct trace_event_call *call = &tu->tp.call;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
init_trace_event_call(tu, call);
|
||||||
|
|
||||||
if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
|
if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -1311,9 +1320,6 @@ static int register_uprobe_event(struct trace_uprobe *tu)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
call->flags = TRACE_EVENT_FL_UPROBE;
|
|
||||||
call->class->reg = trace_uprobe_register;
|
|
||||||
call->data = tu;
|
|
||||||
ret = trace_add_event_call(call);
|
ret = trace_add_event_call(call);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -1339,6 +1345,70 @@ static int unregister_uprobe_event(struct trace_uprobe *tu)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
|
struct trace_event_call *
|
||||||
|
create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
|
||||||
|
{
|
||||||
|
struct trace_uprobe *tu;
|
||||||
|
struct inode *inode;
|
||||||
|
struct path path;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = kern_path(name, LOOKUP_FOLLOW, &path);
|
||||||
|
if (ret)
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
inode = igrab(d_inode(path.dentry));
|
||||||
|
path_put(&path);
|
||||||
|
|
||||||
|
if (!inode || !S_ISREG(inode->i_mode)) {
|
||||||
|
iput(inode);
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* local trace_kprobes are not added to probe_list, so they are never
|
||||||
|
* searched in find_trace_kprobe(). Therefore, there is no concern of
|
||||||
|
* duplicated name "DUMMY_EVENT" here.
|
||||||
|
*/
|
||||||
|
tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
|
||||||
|
is_return);
|
||||||
|
|
||||||
|
if (IS_ERR(tu)) {
|
||||||
|
pr_info("Failed to allocate trace_uprobe.(%d)\n",
|
||||||
|
(int)PTR_ERR(tu));
|
||||||
|
return ERR_CAST(tu);
|
||||||
|
}
|
||||||
|
|
||||||
|
tu->offset = offs;
|
||||||
|
tu->inode = inode;
|
||||||
|
tu->filename = kstrdup(name, GFP_KERNEL);
|
||||||
|
init_trace_event_call(tu, &tu->tp.call);
|
||||||
|
|
||||||
|
if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tu->tp.call;
|
||||||
|
error:
|
||||||
|
free_trace_uprobe(tu);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
void destroy_local_trace_uprobe(struct trace_event_call *event_call)
|
||||||
|
{
|
||||||
|
struct trace_uprobe *tu;
|
||||||
|
|
||||||
|
tu = container_of(event_call, struct trace_uprobe, tp.call);
|
||||||
|
|
||||||
|
kfree(tu->tp.call.print_fmt);
|
||||||
|
tu->tp.call.print_fmt = NULL;
|
||||||
|
|
||||||
|
free_trace_uprobe(tu);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_PERF_EVENTS */
|
||||||
|
|
||||||
/* Make a trace interface for controling probe points */
|
/* Make a trace interface for controling probe points */
|
||||||
static __init int init_uprobe_trace(void)
|
static __init int init_uprobe_trace(void)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user