diff options
Diffstat (limited to 'tools/perf/builtin-kwork.c')
| -rw-r--r-- | tools/perf/builtin-kwork.c | 1832 |
1 files changed, 1832 insertions, 0 deletions
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c new file mode 100644 index 000000000000..fb8c63656ad8 --- /dev/null +++ b/tools/perf/builtin-kwork.c @@ -0,0 +1,1832 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * builtin-kwork.c + * + * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com> + */ + +#include "builtin.h" + +#include "util/data.h" +#include "util/kwork.h" +#include "util/debug.h" +#include "util/symbol.h" +#include "util/thread.h" +#include "util/string2.h" +#include "util/callchain.h" +#include "util/evsel_fprintf.h" + +#include <subcmd/pager.h> +#include <subcmd/parse-options.h> + +#include <errno.h> +#include <inttypes.h> +#include <linux/err.h> +#include <linux/time64.h> +#include <linux/zalloc.h> + +/* + * report header elements width + */ +#define PRINT_CPU_WIDTH 4 +#define PRINT_COUNT_WIDTH 9 +#define PRINT_RUNTIME_WIDTH 10 +#define PRINT_LATENCY_WIDTH 10 +#define PRINT_TIMESTAMP_WIDTH 17 +#define PRINT_KWORK_NAME_WIDTH 30 +#define RPINT_DECIMAL_WIDTH 3 +#define PRINT_BRACKETPAIR_WIDTH 2 +#define PRINT_TIME_UNIT_SEC_WIDTH 2 +#define PRINT_TIME_UNIT_MESC_WIDTH 3 +#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH) +#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH) +#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH) +#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH) + +struct sort_dimension { + const char *name; + int (*cmp)(struct kwork_work *l, struct kwork_work *r); + struct list_head list; +}; + +static int id_cmp(struct kwork_work *l, struct kwork_work *r) +{ + if (l->cpu > r->cpu) + return 1; + if (l->cpu < r->cpu) + return -1; + + if (l->id > r->id) + return 1; + if (l->id < r->id) + return -1; + + return 0; +} + +static int count_cmp(struct kwork_work *l, struct kwork_work *r) +{ + if (l->nr_atoms > r->nr_atoms) + return 1; + if (l->nr_atoms < r->nr_atoms) + return -1; + + return 0; +} + +static int runtime_cmp(struct kwork_work *l, struct kwork_work *r) +{ + if (l->total_runtime > r->total_runtime) + return 1; + if (l->total_runtime < r->total_runtime) + return -1; + + return 0; +} + +static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r) +{ + if (l->max_runtime > r->max_runtime) + return 1; + if (l->max_runtime < r->max_runtime) + return -1; + + return 0; +} + +static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r) +{ + u64 avgl, avgr; + + if (!r->nr_atoms) + return 1; + if (!l->nr_atoms) + return -1; + + avgl = l->total_latency / l->nr_atoms; + avgr = r->total_latency / r->nr_atoms; + + if (avgl > avgr) + return 1; + if (avgl < avgr) + return -1; + + return 0; +} + +static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r) +{ + if (l->max_latency > r->max_latency) + return 1; + if (l->max_latency < r->max_latency) + return -1; + + return 0; +} + +static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused, + const char *tok, struct list_head *list) +{ + size_t i; + static struct sort_dimension max_sort_dimension = { + .name = "max", + .cmp = max_runtime_cmp, + }; + static struct sort_dimension id_sort_dimension = { + .name = "id", + .cmp = id_cmp, + }; + static struct sort_dimension runtime_sort_dimension = { + .name = "runtime", + .cmp = runtime_cmp, + }; + static struct sort_dimension count_sort_dimension = { + .name = "count", + .cmp = count_cmp, + }; + static struct sort_dimension avg_sort_dimension = { + .name = "avg", + .cmp = avg_latency_cmp, + }; + struct sort_dimension *available_sorts[] = { + &id_sort_dimension, + &max_sort_dimension, + &count_sort_dimension, + &runtime_sort_dimension, + &avg_sort_dimension, + }; + + if (kwork->report == KWORK_REPORT_LATENCY) + max_sort_dimension.cmp = max_latency_cmp; + + for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { + if (!strcmp(available_sorts[i]->name, tok)) { + list_add_tail(&available_sorts[i]->list, list); + return 0; + } + } + + return -1; +} + +static void setup_sorting(struct perf_kwork *kwork, + const struct option *options, + const char * const usage_msg[]) +{ + char *tmp, *tok, *str = strdup(kwork->sort_order); + + for (tok = strtok_r(str, ", ", &tmp); + tok; tok = strtok_r(NULL, ", ", &tmp)) { + if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0) + usage_with_options_msg(usage_msg, options, + "Unknown --sort key: `%s'", tok); + } + + pr_debug("Sort order: %s\n", kwork->sort_order); + free(str); +} + +static struct kwork_atom *atom_new(struct perf_kwork *kwork, + struct perf_sample *sample) +{ + unsigned long i; + struct kwork_atom_page *page; + struct kwork_atom *atom = NULL; + + list_for_each_entry(page, &kwork->atom_page_list, list) { + if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) { + i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE); + BUG_ON(i >= NR_ATOM_PER_PAGE); + atom = &page->atoms[i]; + goto found_atom; + } + } + + /* + * new page + */ + page = zalloc(sizeof(*page)); + if (page == NULL) { + pr_err("Failed to zalloc kwork atom page\n"); + return NULL; + } + + i = 0; + atom = &page->atoms[0]; + list_add_tail(&page->list, &kwork->atom_page_list); + +found_atom: + set_bit(i, page->bitmap); + atom->time = sample->time; + atom->prev = NULL; + atom->page_addr = page; + atom->bit_inpage = i; + return atom; +} + +static void atom_free(struct kwork_atom *atom) +{ + if (atom->prev != NULL) + atom_free(atom->prev); + + clear_bit(atom->bit_inpage, + ((struct kwork_atom_page *)atom->page_addr)->bitmap); +} + +static void atom_del(struct kwork_atom *atom) +{ + list_del(&atom->list); + atom_free(atom); +} + +static int work_cmp(struct list_head *list, + struct kwork_work *l, struct kwork_work *r) +{ + int ret = 0; + struct sort_dimension *sort; + + BUG_ON(list_empty(list)); + + list_for_each_entry(sort, list, list) { + ret = sort->cmp(l, r); + if (ret) + return ret; + } + + return ret; +} + +static struct kwork_work *work_search(struct rb_root_cached *root, + struct kwork_work *key, + struct list_head *sort_list) +{ + int cmp; + struct kwork_work *work; + struct rb_node *node = root->rb_root.rb_node; + + while (node) { + work = container_of(node, struct kwork_work, node); + cmp = work_cmp(sort_list, key, work); + if (cmp > 0) + node = node->rb_left; + else if (cmp < 0) + node = node->rb_right; + else { + if (work->name == NULL) + work->name = key->name; + return work; + } + } + return NULL; +} + +static void work_insert(struct rb_root_cached *root, + struct kwork_work *key, struct list_head *sort_list) +{ + int cmp; + bool leftmost = true; + struct kwork_work *cur; + struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; + + while (*new) { + cur = container_of(*new, struct kwork_work, node); + parent = *new; + cmp = work_cmp(sort_list, key, cur); + + if (cmp > 0) + new = &((*new)->rb_left); + else { + new = &((*new)->rb_right); + leftmost = false; + } + } + + rb_link_node(&key->node, parent, new); + rb_insert_color_cached(&key->node, root, leftmost); +} + +static struct kwork_work *work_new(struct kwork_work *key) +{ + int i; + struct kwork_work *work = zalloc(sizeof(*work)); + + if (work == NULL) { + pr_err("Failed to zalloc kwork work\n"); + return NULL; + } + + for (i = 0; i < KWORK_TRACE_MAX; i++) + INIT_LIST_HEAD(&work->atom_list[i]); + + work->id = key->id; + work->cpu = key->cpu; + work->name = key->name; + work->class = key->class; + return work; +} + +static struct kwork_work *work_findnew(struct rb_root_cached *root, + struct kwork_work *key, + struct list_head *sort_list) +{ + struct kwork_work *work = work_search(root, key, sort_list); + + if (work != NULL) + return work; + + work = work_new(key); + if (work) + work_insert(root, work, sort_list); + + return work; +} + +static void profile_update_timespan(struct perf_kwork *kwork, + struct perf_sample *sample) +{ + if (!kwork->summary) + return; + + if ((kwork->timestart == 0) || (kwork->timestart > sample->time)) + kwork->timestart = sample->time; + + if (kwork->timeend < sample->time) + kwork->timeend = sample->time; +} + +static bool profile_event_match(struct perf_kwork *kwork, + struct kwork_work *work, + struct perf_sample *sample) +{ + int cpu = work->cpu; + u64 time = sample->time; + struct perf_time_interval *ptime = &kwork->ptime; + + if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap)) + return false; + + if (((ptime->start != 0) && (ptime->start > time)) || + ((ptime->end != 0) && (ptime->end < time))) + return false; + + if ((kwork->profile_name != NULL) && + (work->name != NULL) && + (strcmp(work->name, kwork->profile_name) != 0)) + return false; + + profile_update_timespan(kwork, sample); + return true; +} + +static int work_push_atom(struct perf_kwork *kwork, + struct kwork_class *class, + enum kwork_trace_type src_type, + enum kwork_trace_type dst_type, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine, + struct kwork_work **ret_work) +{ + struct kwork_atom *atom, *dst_atom; + struct kwork_work *work, key; + + BUG_ON(class->work_init == NULL); + class->work_init(class, &key, evsel, sample, machine); + + atom = atom_new(kwork, sample); + if (atom == NULL) + return -1; + + work = work_findnew(&class->work_root, &key, &kwork->cmp_id); + if (work == NULL) { + free(atom); + return -1; + } + + if (!profile_event_match(kwork, work, sample)) + return 0; + + if (dst_type < KWORK_TRACE_MAX) { + dst_atom = list_last_entry_or_null(&work->atom_list[dst_type], + struct kwork_atom, list); + if (dst_atom != NULL) { + atom->prev = dst_atom; + list_del(&dst_atom->list); + } + } + + if (ret_work != NULL) + *ret_work = work; + + list_add_tail(&atom->list, &work->atom_list[src_type]); + + return 0; +} + +static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork, + struct kwork_class *class, + enum kwork_trace_type src_type, + enum kwork_trace_type dst_type, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine, + struct kwork_work **ret_work) +{ + struct kwork_atom *atom, *src_atom; + struct kwork_work *work, key; + + BUG_ON(class->work_init == NULL); + class->work_init(class, &key, evsel, sample, machine); + + work = work_findnew(&class->work_root, &key, &kwork->cmp_id); + if (ret_work != NULL) + *ret_work = work; + + if (work == NULL) + return NULL; + + if (!profile_event_match(kwork, work, sample)) + return NULL; + + atom = list_last_entry_or_null(&work->atom_list[dst_type], + struct kwork_atom, list); + if (atom != NULL) + return atom; + + src_atom = atom_new(kwork, sample); + if (src_atom != NULL) + list_add_tail(&src_atom->list, &work->atom_list[src_type]); + else { + if (ret_work != NULL) + *ret_work = NULL; + } + + return NULL; +} + +static void report_update_exit_event(struct kwork_work *work, + struct kwork_atom *atom, + struct perf_sample *sample) +{ + u64 delta; + u64 exit_time = sample->time; + u64 entry_time = atom->time; + + if ((entry_time != 0) && (exit_time >= entry_time)) { + delta = exit_time - entry_time; + if ((delta > work->max_runtime) || + (work->max_runtime == 0)) { + work->max_runtime = delta; + work->max_runtime_start = entry_time; + work->max_runtime_end = exit_time; + } + work->total_runtime += delta; + work->nr_atoms++; + } +} + +static int report_entry_event(struct perf_kwork *kwork, + struct kwork_class *class, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + return work_push_atom(kwork, class, KWORK_TRACE_ENTRY, + KWORK_TRACE_MAX, evsel, sample, + machine, NULL); +} + +static int report_exit_event(struct perf_kwork *kwork, + struct kwork_class *class, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct kwork_atom *atom = NULL; + struct kwork_work *work = NULL; + + atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT, + KWORK_TRACE_ENTRY, evsel, sample, + machine, &work); + if (work == NULL) + return -1; + + if (atom != NULL) { + report_update_exit_event(work, atom, sample); + atom_del(atom); + } + + return 0; +} + +static void latency_update_entry_event(struct kwork_work *work, + struct kwork_atom *atom, + struct perf_sample *sample) +{ + u64 delta; + u64 entry_time = sample->time; + u64 raise_time = atom->time; + + if ((raise_time != 0) && (entry_time >= raise_time)) { + delta = entry_time - raise_time; + if ((delta > work->max_latency) || + (work->max_latency == 0)) { + work->max_latency = delta; + work->max_latency_start = raise_time; + work->max_latency_end = entry_time; + } + work->total_latency += delta; + work->nr_atoms++; + } +} + +static int latency_raise_event(struct perf_kwork *kwork, + struct kwork_class *class, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + return work_push_atom(kwork, class, KWORK_TRACE_RAISE, + KWORK_TRACE_MAX, evsel, sample, + machine, NULL); +} + +static int latency_entry_event(struct perf_kwork *kwork, + struct kwork_class *class, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct kwork_atom *atom = NULL; + struct kwork_work *work = NULL; + + atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY, + KWORK_TRACE_RAISE, evsel, sample, + machine, &work); + if (work == NULL) + return -1; + + if (atom != NULL) { + latency_update_entry_event(work, atom, sample); + atom_del(atom); + } + + return 0; +} + +static void timehist_save_callchain(struct perf_kwork *kwork, + struct perf_sample *sample, + struct evsel *evsel, + struct machine *machine) +{ + struct symbol *sym; + struct thread *thread; + struct callchain_cursor_node *node; + struct callchain_cursor *cursor = &callchain_cursor; + + if (!kwork->show_callchain || sample->callchain == NULL) + return; + + /* want main thread for process - has maps */ + thread = machine__findnew_thread(machine, sample->pid, sample->pid); + if (thread == NULL) { + pr_debug("Failed to get thread for pid %d\n", sample->pid); + return; + } + + if (thread__resolve_callchain(thread, cursor, evsel, sample, + NULL, NULL, kwork->max_stack + 2) != 0) { + pr_debug("Failed to resolve callchain, skipping\n"); + goto out_put; + } + + callchain_cursor_commit(cursor); + + while (true) { + node = callchain_cursor_current(cursor); + if (node == NULL) + break; + + sym = node->ms.sym; + if (sym) { + if (!strcmp(sym->name, "__softirqentry_text_start") || + !strcmp(sym->name, "__do_softirq")) + sym->ignore = 1; + } + + callchain_cursor_advance(cursor); + } + +out_put: + thread__put(thread); +} + +static void timehist_print_event(struct perf_kwork *kwork, + struct kwork_work *work, + struct kwork_atom *atom, + struct perf_sample *sample, + struct addr_location *al) +{ + char entrytime[32], exittime[32]; + char kwork_name[PRINT_KWORK_NAME_WIDTH]; + + /* + * runtime start + */ + timestamp__scnprintf_usec(atom->time, + entrytime, sizeof(entrytime)); + printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime); + + /* + * runtime end + */ + timestamp__scnprintf_usec(sample->time, + exittime, sizeof(exittime)); + printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime); + + /* + * cpu + */ + printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu); + + /* + * kwork name + */ + if (work->class && work->class->work_name) { + work->class->work_name(work, kwork_name, + PRINT_KWORK_NAME_WIDTH); + printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name); + } else + printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, ""); + + /* + *runtime + */ + printf(" %*.*f ", + PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH, + (double)(sample->time - atom->time) / NSEC_PER_MSEC); + + /* + * delaytime + */ + if (atom->prev != NULL) + printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH, + (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC); + else + printf(" %*s ", PRINT_LATENCY_WIDTH, " "); + + /* + * callchain + */ + if (kwork->show_callchain) { + printf(" "); + sample__fprintf_sym(sample, al, 0, + EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE | + EVSEL__PRINT_CALLCHAIN_ARROW | + EVSEL__PRINT_SKIP_IGNORED, + &callchain_cursor, symbol_conf.bt_stop_list, + stdout); + } + + printf("\n"); +} + +static int timehist_raise_event(struct perf_kwork *kwork, + struct kwork_class *class, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + return work_push_atom(kwork, class, KWORK_TRACE_RAISE, + KWORK_TRACE_MAX, evsel, sample, + machine, NULL); +} + +static int timehist_entry_event(struct perf_kwork *kwork, + struct kwork_class *class, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + int ret; + struct kwork_work *work = NULL; + + ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY, + KWORK_TRACE_RAISE, evsel, sample, + machine, &work); + if (ret) + return ret; + + if (work != NULL) + timehist_save_callchain(kwork, sample, evsel, machine); + + return 0; +} + +static int timehist_exit_event(struct perf_kwork *kwork, + struct kwork_class *class, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct kwork_atom *atom = NULL; + struct kwork_work *work = NULL; + struct addr_location al; + + if (machine__resolve(machine, &al, sample) < 0) { + pr_debug("Problem processing event, skipping it\n"); + return -1; + } + + atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT, + KWORK_TRACE_ENTRY, evsel, sample, + machine, &work); + if (work == NULL) + return -1; + + if (atom != NULL) { + work->nr_atoms++; + timehist_print_event(kwork, work, atom, sample, &al); + atom_del(atom); + } + + return 0; +} + +static struct kwork_class kwork_irq; +static int process_irq_handler_entry_event(struct perf_tool *tool, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); + + if (kwork->tp_handler->entry_event) + return kwork->tp_handler->entry_event(kwork, &kwork_irq, + evsel, sample, machine); + return 0; +} + +static int process_irq_handler_exit_event(struct perf_tool *tool, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); + + if (kwork->tp_handler->exit_event) + return kwork->tp_handler->exit_event(kwork, &kwork_irq, + evsel, sample, machine); + return 0; +} + +const struct evsel_str_handler irq_tp_handlers[] = { + { "irq:irq_handler_entry", process_irq_handler_entry_event, }, + { "irq:irq_handler_exit", process_irq_handler_exit_event, }, +}; + +static int irq_class_init(struct kwork_class *class, + struct perf_session *session) +{ + if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) { + pr_err("Failed to set irq tracepoints handlers\n"); + return -1; + } + + class->work_root = RB_ROOT_CACHED; + return 0; +} + +static void irq_work_init(struct kwork_class *class, + struct kwork_work *work, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine __maybe_unused) +{ + work->class = class; + work->cpu = sample->cpu; + work->id = evsel__intval(evsel, sample, "irq"); + work->name = evsel__strval(evsel, sample, "name"); +} + +static void irq_work_name(struct kwork_work *work, char *buf, int len) +{ + snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id); +} + +static struct kwork_class kwork_irq = { + .name = "irq", + .type = KWORK_CLASS_IRQ, + .nr_tracepoints = 2, + .tp_handlers = irq_tp_handlers, + .class_init = irq_class_init, + .work_init = irq_work_init, + .work_name = irq_work_name, +}; + +static struct kwork_class kwork_softirq; +static int process_softirq_raise_event(struct perf_tool *tool, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); + + if (kwork->tp_handler->raise_event) + return kwork->tp_handler->raise_event(kwork, &kwork_softirq, + evsel, sample, machine); + + return 0; +} + +static int process_softirq_entry_event(struct perf_tool *tool, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); + + if (kwork->tp_handler->entry_event) + return kwork->tp_handler->entry_event(kwork, &kwork_softirq, + evsel, sample, machine); + + return 0; +} + +static int process_softirq_exit_event(struct perf_tool *tool, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); + + if (kwork->tp_handler->exit_event) + return kwork->tp_handler->exit_event(kwork, &kwork_softirq, + evsel, sample, machine); + + return 0; +} + +const struct evsel_str_handler softirq_tp_handlers[] = { + { "irq:softirq_raise", process_softirq_raise_event, }, + { "irq:softirq_entry", process_softirq_entry_event, }, + { "irq:softirq_exit", process_softirq_exit_event, }, +}; + +static int softirq_class_init(struct kwork_class *class, + struct perf_session *session) +{ + if (perf_session__set_tracepoints_handlers(session, + softirq_tp_handlers)) { + pr_err("Failed to set softirq tracepoints handlers\n"); + return -1; + } + + class->work_root = RB_ROOT_CACHED; + return 0; +} + +static char *evsel__softirq_name(struct evsel *evsel, u64 num) +{ + char *name = NULL; + bool found = false; + struct tep_print_flag_sym *sym = NULL; + struct tep_print_arg *args = evsel->tp_format->print_fmt.args; + + if ((args == NULL) || (args->next == NULL)) + return NULL; + + /* skip softirq field: "REC->vec" */ + for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) { + if ((eval_flag(sym->value) == (unsigned long long)num) && + (strlen(sym->str) != 0)) { + found = true; + break; + } + } + + if (!found) + return NULL; + + name = strdup(sym->str); + if (name == NULL) { + pr_err("Failed to copy symbol name\n"); + return NULL; + } + return name; +} + +static void softirq_work_init(struct kwork_class *class, + struct kwork_work *work, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine __maybe_unused) +{ + u64 num = evsel__intval(evsel, sample, "vec"); + + work->id = num; + work->class = class; + work->cpu = sample->cpu; + work->name = evsel__softirq_name(evsel, num); +} + +static void softirq_work_name(struct kwork_work *work, char *buf, int len) +{ + snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id); +} + +static struct kwork_class kwork_softirq = { + .name = "softirq", + .type = KWORK_CLASS_SOFTIRQ, + .nr_tracepoints = 3, + .tp_handlers = softirq_tp_handlers, + .class_init = softirq_class_init, + .work_init = softirq_work_init, + .work_name = softirq_work_name, +}; + +static struct kwork_class kwork_workqueue; +static int process_workqueue_activate_work_event(struct perf_tool *tool, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); + + if (kwork->tp_handler->raise_event) + return kwork->tp_handler->raise_event(kwork, &kwork_workqueue, + evsel, sample, machine); + + return 0; +} + +static int process_workqueue_execute_start_event(struct perf_tool *tool, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); + + if (kwork->tp_handler->entry_event) + return kwork->tp_handler->entry_event(kwork, &kwork_workqueue, + evsel, sample, machine); + + return 0; +} + +static int process_workqueue_execute_end_event(struct perf_tool *tool, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool); + + if (kwork->tp_handler->exit_event) + return kwork->tp_handler->exit_event(kwork, &kwork_workqueue, + evsel, sample, machine); + + return 0; +} + +const struct evsel_str_handler workqueue_tp_handlers[] = { + { "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, }, + { "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, }, + { "workqueue:workqueue_execute_end", process_workqueue_execute_end_event, }, +}; + +static int workqueue_class_init(struct kwork_class *class, + struct perf_session *session) +{ + if (perf_session__set_tracepoints_handlers(session, + workqueue_tp_handlers)) { + pr_err("Failed to set workqueue tracepoints handlers\n"); + return -1; + } + + class->work_root = RB_ROOT_CACHED; + return 0; +} + +static void workqueue_work_init(struct kwork_class *class, + struct kwork_work *work, + struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + char *modp = NULL; + unsigned long long function_addr = evsel__intval(evsel, + sample, "function"); + + work->class = class; + work->cpu = sample->cpu; + work->id = evsel__intval(evsel, sample, "work"); + work->name = function_addr == 0 ? NULL : + machine__resolve_kernel_addr(machine, &function_addr, &modp); +} + +static void workqueue_work_name(struct kwork_work *work, char *buf, int len) +{ + if (work->name != NULL) + snprintf(buf, len, "(w)%s", work->name); + else + snprintf(buf, len, "(w)0x%" PRIx64, work->id); +} + +static struct kwork_class kwork_workqueue = { + .name = "workqueue", + .type = KWORK_CLASS_WORKQUEUE, + .nr_tracepoints = 3, + .tp_handlers = workqueue_tp_handlers, + .class_init = workqueue_class_init, + .work_init = workqueue_work_init, + .work_name = workqueue_work_name, +}; + +static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = { + [KWORK_CLASS_IRQ] = &kwork_irq, + [KWORK_CLASS_SOFTIRQ] = &kwork_softirq, + [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue, +}; + +static void print_separator(int len) +{ + printf(" %.*s\n", len, graph_dotted_line); +} + +static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work) +{ + int ret = 0; + char kwork_name[PRINT_KWORK_NAME_WIDTH]; + char max_runtime_start[32], max_runtime_end[32]; + char max_latency_start[32], max_latency_end[32]; + + printf(" "); + + /* + * kwork name + */ + if (work->class && work->class->work_name) { + work->class->work_name(work, kwork_name, + PRINT_KWORK_NAME_WIDTH); + ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name); + } else { + ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, ""); + } + + /* + * cpu + */ + ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu); + + /* + * total runtime + */ + if (kwork->report == KWORK_REPORT_RUNTIME) { + ret += printf(" %*.*f ms |", + PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH, + (double)work->total_runtime / NSEC_PER_MSEC); + } else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay + ret += printf(" %*.*f ms |", + PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH, + (double)work->total_latency / + work->nr_atoms / NSEC_PER_MSEC); + } + + /* + * count + */ + ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms); + + /* + * max runtime, max runtime start, max runtime end + */ + if (kwork->report == KWORK_REPORT_RUNTIME) { + timestamp__scnprintf_usec(work->max_runtime_start, + max_runtime_start, + sizeof(max_runtime_start)); + timestamp__scnprintf_usec(work->max_runtime_end, + max_runtime_end, + sizeof(max_runtime_end)); + ret += printf(" %*.*f ms | %*s s | %*s s |", + PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH, + (double)work->max_runtime / NSEC_PER_MSEC, + PRINT_TIMESTAMP_WIDTH, max_runtime_start, + PRINT_TIMESTAMP_WIDTH, max_runtime_end); + } + /* + * max delay, max delay start, max delay end + */ + else if (kwork->report == KWORK_REPORT_LATENCY) { + timestamp__scnprintf_usec(work->max_latency_start, + max_latency_start, + sizeof(max_latency_start)); + timestamp__scnprintf_usec(work->max_latency_end, + max_latency_end, + sizeof(max_latency_end)); + ret += printf(" %*.*f ms | %*s s | %*s s |", + PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH, + (double)work->max_latency / NSEC_PER_MSEC, + PRINT_TIMESTAMP_WIDTH, max_latency_start, + PRINT_TIMESTAMP_WIDTH, max_latency_end); + } + + printf("\n"); + return ret; +} + +static int report_print_header(struct perf_kwork *kwork) +{ + int ret; + + printf("\n "); + ret = printf(" %-*s | %-*s |", + PRINT_KWORK_NAME_WIDTH, "Kwork Name", + PRINT_CPU_WIDTH, "Cpu"); + + if (kwork->report == KWORK_REPORT_RUNTIME) { + ret += printf(" %-*s |", + PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime"); + } else if (kwork->report == KWORK_REPORT_LATENCY) { + ret += printf(" %-*s |", + PRINT_LATENCY_HEADER_WIDTH, "Avg delay"); + } + + ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count"); + + if (kwork->report == KWORK_REPORT_RUNTIME) { + ret += printf(" %-*s | %-*s | %-*s |", + PRINT_RUNTIME_HEADER_WIDTH, "Max runtime", + PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start", + PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end"); + } else if (kwork->report == KWORK_REPORT_LATENCY) { + ret += printf(" %-*s | %-*s | %-*s |", + PRINT_LATENCY_HEADER_WIDTH, "Max delay", + PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start", + PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end"); + } + + printf("\n"); + print_separator(ret); + return ret; +} + +static void timehist_print_header(void) +{ + /* + * header row + */ + printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n", + PRINT_TIMESTAMP_WIDTH, "Runtime start", + PRINT_TIMESTAMP_WIDTH, "Runtime end", + PRINT_TIMEHIST_CPU_WIDTH, "Cpu", + PRINT_KWORK_NAME_WIDTH, "Kwork name", + PRINT_RUNTIME_WIDTH, "Runtime", + PRINT_RUNTIME_WIDTH, "Delaytime"); + + /* + * units row + */ + printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n", + PRINT_TIMESTAMP_WIDTH, "", + PRINT_TIMESTAMP_WIDTH, "", + PRINT_TIMEHIST_CPU_WIDTH, "", + PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM", + PRINT_RUNTIME_WIDTH, "(msec)", + PRINT_RUNTIME_WIDTH, "(msec)"); + + /* + * separator + */ + prin |
