summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c123
1 files changed, 51 insertions, 72 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 75b9979c6c05..7006f848f87a 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -610,22 +610,13 @@ static int dispatch_events(bool forks, int timeout, int interval, int *times)
enum counter_recovery {
COUNTER_SKIP,
COUNTER_RETRY,
- COUNTER_FATAL,
};
static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
{
char msg[BUFSIZ];
- if (counter->skippable) {
- if (verbose > 0) {
- ui__warning("skipping event %s that kernel failed to open .\n",
- evsel__name(counter));
- }
- counter->supported = false;
- counter->errored = true;
- return COUNTER_SKIP;
- }
+ assert(!counter->supported);
/*
* PPC returns ENXIO for HW counters until 2.6.37
@@ -636,19 +627,16 @@ static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
ui__warning("%s event is not supported by the kernel.\n",
evsel__name(counter));
}
- counter->supported = false;
- /*
- * errored is a sticky flag that means one of the counter's
- * cpu event had a problem and needs to be reexamined.
- */
- counter->errored = true;
- } else if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) {
+ return COUNTER_SKIP;
+ }
+ if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
+ counter->supported = true;
return COUNTER_RETRY;
- } else if (target__has_per_thread(&target) && err != EOPNOTSUPP &&
- evsel_list->core.threads &&
- evsel_list->core.threads->err_thread != -1) {
+ }
+ if (target__has_per_thread(&target) && err != EOPNOTSUPP &&
+ evsel_list->core.threads && evsel_list->core.threads->err_thread != -1) {
/*
* For global --per-thread case, skip current
* error thread.
@@ -656,24 +644,17 @@ static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
if (!thread_map__remove(evsel_list->core.threads,
evsel_list->core.threads->err_thread)) {
evsel_list->core.threads->err_thread = -1;
+ counter->supported = true;
return COUNTER_RETRY;
}
- } else if (err == EOPNOTSUPP) {
- if (verbose > 0) {
- ui__warning("%s event is not supported by the kernel.\n",
- evsel__name(counter));
- }
- counter->supported = false;
- counter->errored = true;
}
-
- evsel__open_strerror(counter, &target, err, msg, sizeof(msg));
- ui__error("%s\n", msg);
-
- if (child_pid != -1)
- kill(child_pid, SIGTERM);
-
- return COUNTER_FATAL;
+ if (verbose > 0) {
+ ui__warning(err == EOPNOTSUPP
+ ? "%s event is not supported by the kernel.\n"
+ : "skipping event %s that kernel failed to open.\n",
+ evsel__name(counter));
+ }
+ return COUNTER_SKIP;
}
static int create_perf_stat_counter(struct evsel *evsel,
@@ -746,8 +727,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity = NULL;
- int err;
- bool second_pass = false;
+ int err, open_err = 0;
+ bool second_pass = false, has_supported_counters;
if (forks) {
if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
@@ -787,14 +768,17 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (target.use_bpf)
break;
- if (counter->reset_group || counter->errored)
+ if (counter->reset_group || !counter->supported)
continue;
if (evsel__is_bperf(counter))
continue;
-try_again:
- if (create_perf_stat_counter(counter, &stat_config,
- evlist_cpu_itr.cpu_map_idx) < 0) {
+ while (true) {
+ if (create_perf_stat_counter(counter, &stat_config,
+ evlist_cpu_itr.cpu_map_idx) == 0)
+ break;
+
+ open_err = errno;
/*
* Weak group failed. We cannot just undo this here
* because earlier CPUs might be in group mode, and the kernel
@@ -802,29 +786,19 @@ try_again:
* it to later.
* Don't close here because we're in the wrong affinity.
*/
- if ((errno == EINVAL || errno == EBADF) &&
+ if ((open_err == EINVAL || open_err == EBADF) &&
evsel__leader(counter) != counter &&
counter->weak_group) {
evlist__reset_weak_group(evsel_list, counter, false);
assert(counter->reset_group);
+ counter->supported = true;
second_pass = true;
- continue;
- }
-
- switch (stat_handle_error(counter, errno)) {
- case COUNTER_FATAL:
- err = -1;
- goto err_out;
- case COUNTER_RETRY:
- goto try_again;
- case COUNTER_SKIP:
- continue;
- default:
break;
}
+ if (stat_handle_error(counter, open_err) != COUNTER_RETRY)
+ break;
}
- counter->supported = true;
}
if (second_pass) {
@@ -837,7 +811,7 @@ try_again:
evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
counter = evlist_cpu_itr.evsel;
- if (!counter->reset_group && !counter->errored)
+ if (!counter->reset_group && counter->supported)
continue;
perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
@@ -848,34 +822,29 @@ try_again:
if (!counter->reset_group)
continue;
-try_again_reset:
- pr_debug2("reopening weak %s\n", evsel__name(counter));
- if (create_perf_stat_counter(counter, &stat_config,
- evlist_cpu_itr.cpu_map_idx) < 0) {
-
- switch (stat_handle_error(counter, errno)) {
- case COUNTER_FATAL:
- err = -1;
- goto err_out;
- case COUNTER_RETRY:
- goto try_again_reset;
- case COUNTER_SKIP:
- continue;
- default:
+
+ while (true) {
+ pr_debug2("reopening weak %s\n", evsel__name(counter));
+ if (create_perf_stat_counter(counter, &stat_config,
+ evlist_cpu_itr.cpu_map_idx) == 0)
+ break;
+
+ open_err = errno;
+ if (stat_handle_error(counter, open_err) != COUNTER_RETRY)
break;
- }
}
- counter->supported = true;
}
}
affinity__cleanup(affinity);
affinity = NULL;
+ has_supported_counters = false;
evlist__for_each_entry(evsel_list, counter) {
if (!counter->supported) {
perf_evsel__free_fd(&counter->core);
continue;
}
+ has_supported_counters = true;
l = strlen(counter->unit);
if (l > stat_config.unit_width)
@@ -887,6 +856,16 @@ try_again_reset:
goto err_out;
}
}
+ if (!has_supported_counters) {
+ evsel__open_strerror(evlist__first(evsel_list), &target, open_err,
+ msg, sizeof(msg));
+ ui__error("No supported events found.\n%s\n", msg);
+
+ if (child_pid != -1)
+ kill(child_pid, SIGTERM);
+ err = -1;
+ goto err_out;
+ }
if (evlist__apply_filters(evsel_list, &counter, &target)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",