From 27a0dcb7adb52473dd98d285a46b764b9219d303 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 14 May 2013 11:09:02 +0900 Subject: [PATCH] perf hists: Move locking to its call-sites It's a preparation patch to eliminate unneeded locking in the perf report path. Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: David Ahern Cc: Ingo Molnar Cc: Jiri Olsa Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1368497347-9628-5-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 26 ++++++++++++++------------ tools/perf/builtin-top.c | 3 +++ tools/perf/util/hist.c | 6 +----- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index d45bf9b0361d..63febd24e912 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -297,6 +297,7 @@ static int process_sample_event(struct perf_tool *tool, { struct perf_report *rep = container_of(tool, struct perf_report, tool); struct addr_location al; + int ret; if (perf_event__preprocess_sample(event, machine, &al, sample, rep->annotate_init) < 0) { @@ -311,28 +312,29 @@ static int process_sample_event(struct perf_tool *tool, if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) return 0; + pthread_mutex_lock(&evsel->hists.lock); + if (sort__mode == SORT_MODE__BRANCH) { - if (perf_report__add_branch_hist_entry(tool, &al, sample, - evsel, machine)) { + ret = perf_report__add_branch_hist_entry(tool, &al, sample, + evsel, machine); + if (ret < 0) pr_debug("problem adding lbr entry, skipping event\n"); - return -1; - } } else if (rep->mem_mode == 1) { - if (perf_report__add_mem_hist_entry(tool, &al, sample, - evsel, machine, event)) { + ret = perf_report__add_mem_hist_entry(tool, &al, sample, + evsel, machine, event); + if (ret < 0) pr_debug("problem adding mem entry, skipping event\n"); - return -1; - } } else { if (al.map != NULL) al.map->dso->hit = 1; - if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { + ret = perf_evsel__add_hist_entry(evsel, &al, sample, machine); + if (ret < 0) pr_debug("problem incrementing symbol period, skipping event\n"); - return -1; - } } - return 0; + pthread_mutex_unlock(&evsel->hists.lock); + + return ret; } static int process_read_event(struct perf_tool *tool, diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 5cd41ec43ce1..c2c973476479 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -245,8 +245,11 @@ static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, { struct hist_entry *he; + pthread_mutex_lock(&evsel->hists.lock); he = __hists__add_entry(&evsel->hists, al, NULL, sample->period, sample->weight); + pthread_mutex_unlock(&evsel->hists.lock); + if (he == NULL) return NULL; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 7e0fa628e9ab..b11a6cfdb414 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -347,8 +347,6 @@ static struct hist_entry *add_hist_entry(struct hists *hists, struct hist_entry *he; int cmp; - pthread_mutex_lock(&hists->lock); - p = &hists->entries_in->rb_node; while (*p != NULL) { @@ -394,14 +392,12 @@ static struct hist_entry *add_hist_entry(struct hists *hists, he = hist_entry__new(entry); if (!he) - goto out_unlock; + return NULL; rb_link_node(&he->rb_node_in, parent, p); rb_insert_color(&he->rb_node_in, hists->entries_in); out: hist_entry__add_cpumode_period(he, al->cpumode, period); -out_unlock: - pthread_mutex_unlock(&hists->lock); return he; }