summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r--tools/perf/builtin-trace.c106
1 files changed, 64 insertions, 42 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index bdfd3d5128b7..3017291242cf 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -149,7 +149,6 @@ enum summary_mode {
struct trace {
struct perf_tool tool;
- struct syscalltbl *sctbl;
struct {
/** Sorted sycall numbers used by the trace. */
struct syscall *table;
@@ -188,6 +187,14 @@ struct trace {
pid_t *entries;
struct bpf_map *map;
} filter_pids;
+ /*
+ * TODO: The map is from an ID (aka system call number) to struct
+ * syscall_stats. If there is >1 e_machine, such as i386 and x86-64
+ * processes, then the stats here will gather wrong the statistics for
+ * the non EM_HOST system calls. A fix would be to add the e_machine
+ * into the key, but this would make the code inconsistent with the
+ * per-thread version.
+ */
struct hashmap *syscall_stats;
double duration_filter;
double runtime_ms;
@@ -2141,7 +2148,7 @@ static int syscall__read_info(struct syscall *sc, struct trace *trace)
return 0;
}
- name = syscalltbl__name(trace->sctbl, sc->id);
+ name = syscalltbl__name(sc->e_machine, sc->id);
if (name == NULL) {
sc->nonexistent = true;
return -EEXIST;
@@ -2241,10 +2248,14 @@ static int trace__validate_ev_qualifier(struct trace *trace)
strlist__for_each_entry(pos, trace->ev_qualifier) {
const char *sc = pos->s;
- int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
+ /*
+ * TODO: Assume more than the validation/warnings are all for
+ * the same binary type as perf.
+ */
+ int id = syscalltbl__id(EM_HOST, sc), match_next = -1;
if (id < 0) {
- id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
+ id = syscalltbl__strglobmatch_first(EM_HOST, sc, &match_next);
if (id >= 0)
goto matches;
@@ -2264,7 +2275,7 @@ matches:
continue;
while (1) {
- id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
+ id = syscalltbl__strglobmatch_next(EM_HOST, sc, &match_next);
if (id < 0)
break;
if (nr_allocated == nr_used) {
@@ -2722,6 +2733,7 @@ static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
int augmented_args_size = 0;
void *augmented_args = NULL;
+ /* TODO: get e_machine from thread. */
struct syscall *sc = trace__syscall_info(trace, evsel, EM_HOST, id);
struct thread_trace *ttrace;
@@ -2796,6 +2808,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
struct thread_trace *ttrace;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
+ /* TODO: get e_machine from thread. */
struct syscall *sc = trace__syscall_info(trace, evsel, EM_HOST, id);
char msg[1024];
void *args, *augmented_args = NULL;
@@ -2871,6 +2884,7 @@ static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
int alignment = trace->args_alignment;
+ /* TODO: get e_machine from thread. */
struct syscall *sc = trace__syscall_info(trace, evsel, EM_HOST, id);
struct thread_trace *ttrace;
@@ -3224,6 +3238,7 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
if (evsel == trace->syscalls.events.bpf_output) {
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+ /* TODO: get e_machine from thread. */
struct syscall *sc = trace__syscall_info(trace, evsel, EM_HOST, id);
if (sc) {
@@ -3731,9 +3746,9 @@ out_unaugmented:
return trace->skel->progs.syscall_unaugmented;
}
-static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
+static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, int id)
{
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, id);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
if (sc == NULL)
return;
@@ -3742,22 +3757,22 @@ static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
}
-static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
+static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int e_machine, int id)
{
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, id);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
-static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
+static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int e_machine, int id)
{
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, id);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, id);
return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
}
-static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int key, unsigned int *beauty_array)
+static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int e_machine, int key, unsigned int *beauty_array)
{
struct tep_format_field *field;
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, key);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key);
const struct btf_type *bt;
char *struct_offset, *tmp, name[32];
bool can_augment = false;
@@ -3854,8 +3869,8 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
return NULL;
try_to_find_pair:
- for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
- int id = syscalltbl__id_at_idx(trace->sctbl, i);
+ for (int i = 0, num_idx = syscalltbl__num_idx(sc.e_machine); i < num_idx; ++i) {
+ int id = syscalltbl__id_at_idx(sc.e_machine, i);
/* calling trace__syscall_info() may invalidate '_sc' */
struct syscall *pair = trace__syscall_info(trace, NULL, sc.e_machine, id);
struct bpf_program *pair_prog;
@@ -3941,7 +3956,7 @@ try_to_find_pair:
return NULL;
}
-static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
+static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_machine)
{
int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
@@ -3949,27 +3964,27 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
int err = 0;
unsigned int beauty_array[6];
- for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
- int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i);
+ for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) {
+ int prog_fd, key = syscalltbl__id_at_idx(e_machine, i);
if (!trace__syscall_enabled(trace, key))
continue;
- trace__init_syscall_bpf_progs(trace, key);
+ trace__init_syscall_bpf_progs(trace, e_machine, key);
// It'll get at least the "!raw_syscalls:unaugmented"
- prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
+ prog_fd = trace__bpf_prog_sys_enter_fd(trace, e_machine, key);
err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
if (err)
break;
- prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
+ prog_fd = trace__bpf_prog_sys_exit_fd(trace, e_machine, key);
err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
if (err)
break;
/* use beauty_map to tell BPF how many bytes to collect, set beauty_map's value here */
memset(beauty_array, 0, sizeof(beauty_array));
- err = trace__bpf_sys_enter_beauty_map(trace, key, (unsigned int *)beauty_array);
+ err = trace__bpf_sys_enter_beauty_map(trace, e_machine, key, (unsigned int *)beauty_array);
if (err)
continue;
err = bpf_map_update_elem(beauty_map_fd, &key, beauty_array, BPF_ANY);
@@ -4005,9 +4020,9 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
* first and second arg (this one on the raw_syscalls:sys_exit prog
* array tail call, then that one will be used.
*/
- for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
- int key = syscalltbl__id_at_idx(trace->sctbl, i);
- struct syscall *sc = trace__syscall_info(trace, NULL, EM_HOST, key);
+ for (int i = 0, num_idx = syscalltbl__num_idx(e_machine); i < num_idx; ++i) {
+ int key = syscalltbl__id_at_idx(e_machine, i);
+ struct syscall *sc = trace__syscall_info(trace, NULL, e_machine, key);
struct bpf_program *pair_prog;
int prog_fd;
@@ -4032,7 +4047,7 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
* Get syscall info again as find usable entry above might
* modify the syscall table and shuffle it.
*/
- sc = trace__syscall_info(trace, NULL, EM_HOST, key);
+ sc = trace__syscall_info(trace, NULL, e_machine, key);
sc->bpf_prog.sys_enter = pair_prog;
/*
@@ -4457,8 +4472,13 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_error_mem;
#ifdef HAVE_BPF_SKEL
- if (trace->skel && trace->skel->progs.sys_enter)
- trace__init_syscalls_bpf_prog_array_maps(trace);
+ if (trace->skel && trace->skel->progs.sys_enter) {
+ /*
+ * TODO: Initialize for all host binary machine types, not just
+ * those matching the perf binary.
+ */
+ trace__init_syscalls_bpf_prog_array_maps(trace, EM_HOST);
+ }
#endif
if (trace->ev_qualifier_ids.nr > 0) {
@@ -4483,7 +4503,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
* So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
* not in use.
*/
- trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
+ /* TODO: support for more than just perf binary machine type close. */
+ trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(EM_HOST, "close"));
err = trace__expand_filters(trace, &evsel);
if (err)
@@ -4796,7 +4817,7 @@ static struct syscall_entry *syscall__sort_stats(struct hashmap *syscall_stats)
return entry;
}
-static size_t syscall__dump_stats(struct trace *trace, FILE *fp,
+static size_t syscall__dump_stats(struct trace *trace, int e_machine, FILE *fp,
struct hashmap *syscall_stats)
{
size_t printed = 0;
@@ -4827,7 +4848,7 @@ static size_t syscall__dump_stats(struct trace *trace, FILE *fp,
pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
avg /= NSEC_PER_MSEC;
- sc = trace__syscall_info(trace, /*evsel=*/NULL, EM_HOST, entry->syscall);
+ sc = trace__syscall_info(trace, /*evsel=*/NULL, e_machine, entry->syscall);
if (!sc)
continue;
@@ -4854,14 +4875,14 @@ static size_t syscall__dump_stats(struct trace *trace, FILE *fp,
}
static size_t thread__dump_stats(struct thread_trace *ttrace,
- struct trace *trace, FILE *fp)
+ struct trace *trace, int e_machine, FILE *fp)
{
- return syscall__dump_stats(trace, fp, ttrace->syscall_stats);
+ return syscall__dump_stats(trace, e_machine, fp, ttrace->syscall_stats);
}
-static size_t system__dump_stats(struct trace *trace, FILE *fp)
+static size_t system__dump_stats(struct trace *trace, int e_machine, FILE *fp)
{
- return syscall__dump_stats(trace, fp, trace->syscall_stats);
+ return syscall__dump_stats(trace, e_machine, fp, trace->syscall_stats);
}
static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
@@ -4887,7 +4908,8 @@ static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trac
else if (fputc('\n', fp) != EOF)
++printed;
- printed += thread__dump_stats(ttrace, trace, fp);
+ /* TODO: get e_machine from thread. */
+ printed += thread__dump_stats(ttrace, trace, EM_HOST, fp);
return printed;
}
@@ -4948,7 +4970,8 @@ static size_t trace__fprintf_total_summary(struct trace *trace, FILE *fp)
else if (fputc('\n', fp) != EOF)
++printed;
- printed += system__dump_stats(trace, fp);
+ /* TODO: get all system e_machines. */
+ printed += system__dump_stats(trace, EM_HOST, fp);
return printed;
}
@@ -5140,8 +5163,9 @@ static int trace__parse_events_option(const struct option *opt, const char *str,
*sep = '\0';
list = 0;
- if (syscalltbl__id(trace->sctbl, s) >= 0 ||
- syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
+ /* TODO: support for more than just perf binary machine type syscalls. */
+ if (syscalltbl__id(EM_HOST, s) >= 0 ||
+ syscalltbl__strglobmatch_first(EM_HOST, s, &idx) >= 0) {
list = 1;
goto do_concat;
}
@@ -5294,7 +5318,6 @@ static void trace__exit(struct trace *trace)
syscall__exit(&trace->syscalls.table[i]);
zfree(&trace->syscalls.table);
}
- syscalltbl__delete(trace->sctbl);
zfree(&trace->perfconfig_events);
}
@@ -5443,9 +5466,8 @@ int cmd_trace(int argc, const char **argv)
sigaction(SIGCHLD, &sigchld_act, NULL);
trace.evlist = evlist__new();
- trace.sctbl = syscalltbl__new();
- if (trace.evlist == NULL || trace.sctbl == NULL) {
+ if (trace.evlist == NULL) {
pr_err("Not enough memory to run!\n");
err = -ENOMEM;
goto out;