summaryrefslogtreecommitdiff
path: root/tools/perf/util
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2023-06-08 16:28:00 -0700
committerArnaldo Carvalho de Melo <acme@redhat.com>2023-06-12 15:57:53 -0300
commitee84a3032b74055feed192a727e872b0a18d1140 (patch)
treec330d1abdf2adba7069a62ba8dd3b8d602def4c5 /tools/perf/util
parent7ee227f674028435c01cb6fa02fa268ae48b1823 (diff)
downloadlinux-ee84a3032b74055feed192a727e872b0a18d1140.tar.gz
linux-ee84a3032b74055feed192a727e872b0a18d1140.tar.bz2
linux-ee84a3032b74055feed192a727e872b0a18d1140.zip
perf thread: Add accessor functions for thread
Using accessors will make it easier to add reference count checking in later patches. Committer notes: thread->nsinfo wasn't wrapped as it is used together with nsinfo__zput(), where does a trick to set the field with a refcount being dropped to NULL, and that doesn't work well with using thread__nsinfo(thread), that loses the &thread->nsinfo pointer. When refcount checking is added to 'struct thread', later in this series, nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo) will be used to check the thread pointer. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ali Saidi <alisaidi@amazon.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com> Cc: Brian Robbins <brianrob@linux.microsoft.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: Fangrui Song <maskray@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ivan Babrou <ivan@cloudflare.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Ye Xingchen <ye.xingchen@zte.com.cn> Cc: Yuan Can <yuancan@huawei.com> Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230608232823.4027869-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/arm-spe.c4
-rw-r--r--tools/perf/util/cs-etm.c2
-rw-r--r--tools/perf/util/data-convert-json.c8
-rw-r--r--tools/perf/util/db-export.c16
-rw-r--r--tools/perf/util/dlfilter.c4
-rw-r--r--tools/perf/util/event.c6
-rw-r--r--tools/perf/util/hist.c6
-rw-r--r--tools/perf/util/intel-bts.c2
-rw-r--r--tools/perf/util/intel-pt.c12
-rw-r--r--tools/perf/util/jitdump.c10
-rw-r--r--tools/perf/util/machine.c91
-rw-r--r--tools/perf/util/map.c2
-rw-r--r--tools/perf/util/maps.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c14
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/sort.c10
-rw-r--r--tools/perf/util/thread-stack.c25
-rw-r--r--tools/perf/util/thread.c161
-rw-r--r--tools/perf/util/thread.h188
-rw-r--r--tools/perf/util/unwind-libdw.c6
-rw-r--r--tools/perf/util/unwind-libunwind-local.c6
-rw-r--r--tools/perf/util/unwind-libunwind.c2
-rw-r--r--tools/perf/util/vdso.c2
23 files changed, 386 insertions, 195 deletions
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 7b36ba6b4079..afbd5869f6bf 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -254,9 +254,9 @@ static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
}
if (speq->thread) {
- speq->pid = speq->thread->pid_;
+ speq->pid = thread__pid(speq->thread);
if (queue->cpu == -1)
- speq->cpu = speq->thread->cpu;
+ speq->cpu = thread__cpu(speq->thread);
}
}
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 0f5be4ad24ba..b550c7393155 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -1311,7 +1311,7 @@ static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
tidq->tid);
if (tidq->thread)
- tidq->pid = tidq->thread->pid_;
+ tidq->pid = thread__pid(tidq->thread);
}
int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
index 653709ab867a..291591e303cd 100644
--- a/tools/perf/util/data-convert-json.c
+++ b/tools/perf/util/data-convert-json.c
@@ -172,13 +172,13 @@ static int process_sample_event(struct perf_tool *tool,
output_json_format(out, false, 2, "{");
output_json_key_format(out, false, 3, "timestamp", "%" PRIi64, sample->time);
- output_json_key_format(out, true, 3, "pid", "%i", al.thread->pid_);
- output_json_key_format(out, true, 3, "tid", "%i", al.thread->tid);
+ output_json_key_format(out, true, 3, "pid", "%i", thread__pid(al.thread));
+ output_json_key_format(out, true, 3, "tid", "%i", thread__tid(al.thread));
if ((sample_type & PERF_SAMPLE_CPU))
output_json_key_format(out, true, 3, "cpu", "%i", sample->cpu);
- else if (al.thread->cpu >= 0)
- output_json_key_format(out, true, 3, "cpu", "%i", al.thread->cpu);
+ else if (thread__cpu(al.thread) >= 0)
+ output_json_key_format(out, true, 3, "cpu", "%i", thread__cpu(al.thread));
output_json_key_string(out, true, 3, "comm", thread__comm_str(al.thread));
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 84c970c11794..751fd53bfd93 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -64,13 +64,13 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
{
u64 main_thread_db_id = 0;
- if (thread->db_id)
+ if (thread__db_id(thread))
return 0;
- thread->db_id = ++dbe->thread_last_db_id;
+ thread__set_db_id(thread, ++dbe->thread_last_db_id);
if (main_thread)
- main_thread_db_id = main_thread->db_id;
+ main_thread_db_id = thread__db_id(main_thread);
if (dbe->export_thread)
return dbe->export_thread(dbe, thread, main_thread_db_id,
@@ -251,7 +251,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
*/
al.sym = node->ms.sym;
al.map = node->ms.map;
- al.maps = thread->maps;
+ al.maps = thread__maps(thread);
al.addr = node->ip;
if (al.map && !al.sym)
@@ -321,7 +321,7 @@ static int db_export__threads(struct db_export *dbe, struct thread *thread,
* For a non-main thread, db_export__comm_thread() must be
* called only if thread has not previously been exported.
*/
- bool export_comm_thread = comm && !thread->db_id;
+ bool export_comm_thread = comm && !thread__db_id(thread);
err = db_export__thread(dbe, thread, machine, main_thread);
if (err)
@@ -529,16 +529,16 @@ static int db_export__pid_tid(struct db_export *dbe, struct machine *machine,
struct thread *main_thread;
int err = 0;
- if (!thread || !thread->comm_set)
+ if (!thread || !thread__comm_set(thread))
goto out_put;
- *is_idle = !thread->pid_ && !thread->tid;
+ *is_idle = !thread__pid(thread) && !thread__tid(thread);
main_thread = thread__main_thread(machine, thread);
err = db_export__threads(dbe, thread, main_thread, machine, comm_ptr);
- *db_id = thread->db_id;
+ *db_id = thread__db_id(thread);
thread__put(main_thread);
out_put:
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 16238f823a5e..8016f21dc0b8 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -197,8 +197,8 @@ static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
return NULL;
- if (al->thread->maps) {
- struct machine *machine = maps__machine(al->thread->maps);
+ if (thread__maps(al->thread)) {
+ struct machine *machine = maps__machine(thread__maps(al->thread));
if (machine)
script_fetch_insn(d->sample, al->thread, machine);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index e8b0666d913c..e1ce7cb5e421 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -573,7 +573,7 @@ int perf_event__process(struct perf_tool *tool __maybe_unused,
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
- struct maps *maps = thread->maps;
+ struct maps *maps = thread__maps(thread);
struct machine *machine = maps__machine(maps);
bool load_map = false;
@@ -639,7 +639,7 @@ struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
struct map *map = thread__find_map(thread, cpumode, addr, al);
- struct machine *machine = maps__machine(thread->maps);
+ struct machine *machine = maps__machine(thread__maps(thread));
u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
if (map || addr_cpumode == cpumode)
@@ -696,7 +696,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
if (thread == NULL)
return -1;
- dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
+ dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
thread__find_map(thread, sample->cpumode, sample->ip, al);
dso = al->map ? map__dso(al->map) : NULL;
dump_printf(" ...... dso: %s\n",
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 3c9301a26dfc..4bc3affbe891 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -2778,12 +2778,12 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
if (hists__has(hists, thread)) {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
- (thread->comm_set ? thread__comm_str(thread) : ""),
- thread->tid);
+ (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
+ thread__tid(thread));
} else {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s",
- (thread->comm_set ? thread__comm_str(thread) : ""));
+ (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
}
}
if (dso)
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 2c8147a62203..ec1b3bd9f530 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -456,7 +456,7 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
thread = machine__find_thread(btsq->bts->machine, -1,
btsq->tid);
if (thread)
- btsq->pid = thread->pid_;
+ btsq->pid = thread__pid(thread);
} else {
thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
btsq->tid);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index dde2ca77a005..45c7e7722916 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1428,13 +1428,13 @@ static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
ptq->guest_machine = machine;
}
- vcpu = ptq->thread ? ptq->thread->guest_cpu : -1;
+ vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
if (vcpu < 0)
return -1;
tid = machine__get_current_tid(machine, vcpu);
- if (ptq->guest_thread && ptq->guest_thread->tid != tid)
+ if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid)
thread__zput(ptq->guest_thread);
if (!ptq->guest_thread) {
@@ -1444,7 +1444,7 @@ static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
}
ptq->guest_machine_pid = machine_pid;
- ptq->guest_pid = ptq->guest_thread->pid_;
+ ptq->guest_pid = thread__pid(ptq->guest_thread);
ptq->guest_tid = tid;
ptq->vcpu = vcpu;
@@ -1467,9 +1467,9 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
if (ptq->thread) {
- ptq->pid = ptq->thread->pid_;
+ ptq->pid = thread__pid(ptq->thread);
if (queue->cpu == -1)
- ptq->cpu = ptq->thread->cpu;
+ ptq->cpu = thread__cpu(ptq->thread);
}
if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
@@ -3074,7 +3074,7 @@ static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
if (ptq->pid == -1) {
ptq->thread = machine__find_thread(m, -1, ptq->tid);
if (ptq->thread)
- ptq->pid = ptq->thread->pid_;
+ ptq->pid = thread__pid(ptq->thread);
return;
}
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 28e49502db5e..2380b41a4caa 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -799,17 +799,19 @@ static void jit_add_pid(struct machine *machine, pid_t pid)
return;
}
- thread->priv = (void *)1;
+ thread__set_priv(thread, (void *)true);
}
static bool jit_has_pid(struct machine *machine, pid_t pid)
{
struct thread *thread = machine__find_thread(machine, pid, pid);
+ void *priv;
if (!thread)
- return 0;
+ return false;
- return (bool)thread->priv;
+ priv = thread__priv(thread);
+ return (bool)priv;
}
int
@@ -833,7 +835,7 @@ jit_process(struct perf_session *session,
return 0;
}
- nsi = nsinfo__get(thread->nsinfo);
+ nsi = nsinfo__get(thread__nsinfo(thread));
thread__put(thread);
/*
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index cbf092e32ee9..5d34d60a0045 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -77,13 +77,14 @@ static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd)
{
int to_find = (int) *((pid_t *)key);
- return to_find - (int)rb_entry(nd, struct thread_rb_node, rb_node)->thread->tid;
+ return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread);
}
static struct thread_rb_node *thread_rb_node__find(const struct thread *th,
struct rb_root *tree)
{
- struct rb_node *nd = rb_find(&th->tid, tree, thread_rb_node__cmp_tid);
+ pid_t to_find = thread__tid(th);
+ struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid);
return rb_entry(nd, struct thread_rb_node, rb_node);
}
@@ -440,7 +441,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
return NULL;
/* Assume maps are set up if there are any */
- if (maps__nr_maps(thread->maps))
+ if (maps__nr_maps(thread__maps(thread)))
return thread;
host_thread = machine__find_thread(host_machine, -1, pid);
@@ -453,7 +454,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
* Guest code can be found in hypervisor process at the same address
* so copy host maps.
*/
- err = maps__clone(thread, host_thread->maps);
+ err = maps__clone(thread, thread__maps(host_thread));
thread__put(host_thread);
if (err)
goto out_err;
@@ -518,45 +519,45 @@ static void machine__update_thread_pid(struct machine *machine,
{
struct thread *leader;
- if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
+ if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
return;
- th->pid_ = pid;
+ thread__set_pid(th, pid);
- if (th->pid_ == th->tid)
+ if (thread__pid(th) == thread__tid(th))
return;
- leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
+ leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
if (!leader)
goto out_err;
- if (!leader->maps)
- leader->maps = maps__new(machine);
+ if (!thread__maps(leader))
+ thread__set_maps(leader, maps__new(machine));
- if (!leader->maps)
+ if (!thread__maps(leader))
goto out_err;
- if (th->maps == leader->maps)
+ if (thread__maps(th) == thread__maps(leader))
return;
- if (th->maps) {
+ if (thread__maps(th)) {
/*
* Maps are created from MMAP events which provide the pid and
* tid. Consequently there never should be any maps on a thread
* with an unknown pid. Just print an error if there are.
*/
- if (!maps__empty(th->maps))
+ if (!maps__empty(thread__maps(th)))
pr_err("Discarding thread maps for %d:%d\n",
- th->pid_, th->tid);
- maps__put(th->maps);
+ thread__pid(th), thread__tid(th));
+ maps__put(thread__maps(th));
}
- th->maps = maps__get(leader->maps);
+ thread__set_maps(th, maps__get(thread__maps(leader)));
out_put:
thread__put(leader);
return;
out_err:
- pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
+ pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
goto out_put;
}
@@ -573,7 +574,7 @@ __threads__get_last_match(struct threads *threads, struct machine *machine,
th = threads->last_match;
if (th != NULL) {
- if (th->tid == tid) {
+ if (thread__tid(th) == tid) {
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
@@ -632,13 +633,13 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
parent = *p;
th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
- if (th->tid == tid) {
+ if (thread__tid(th) == tid) {
threads__set_last_match(threads, th);
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
- if (tid < th->tid)
+ if (tid < thread__tid(th))
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
@@ -2049,7 +2050,7 @@ out_problem:
static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
struct thread *th, bool lock)
{
- struct threads *threads = machine__threads(machine, th->tid);
+ struct threads *threads = machine__threads(machine, thread__tid(th));
if (!nd)
nd = thread_rb_node__find(th, &threads->entries.rb_root);
@@ -2060,7 +2061,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread_rb_n
if (lock)
down_write(&threads->lock);
- BUG_ON(refcount_read(&th->refcnt) == 0);
+ BUG_ON(refcount_read(thread__refcnt(th)) == 0);
thread__put(nd->thread);
rb_erase_cached(&nd->rb_node, &threads->entries);
@@ -2099,9 +2100,9 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
* (fork) event that would have removed the thread was lost. Assume the
* latter case and continue on as best we can.
*/
- if (parent->pid_ != (pid_t)event->fork.ppid) {
+ if (thread__pid(parent) != (pid_t)event->fork.ppid) {
dump_printf("removing erroneous parent thread %d/%d\n",
- parent->pid_, parent->tid);
+ thread__pid(parent), thread__tid(parent));
machine__remove_thread(machine, parent);
thread__put(parent);
parent = machine__findnew_thread(machine, event->fork.ppid,
@@ -2511,7 +2512,7 @@ static void save_lbr_cursor_node(struct thread *thread,
struct callchain_cursor *cursor,
int idx)
{
- struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
if (!lbr_stitch)
return;
@@ -2553,7 +2554,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
* in callchain_cursor_commit() when the writing session is closed.
* Using curr and pos to track the current cursor node.
*/
- if (thread->lbr_stitch) {
+ if (thread__lbr_stitch(thread)) {
cursor->curr = NULL;
cursor->pos = cursor->nr;
if (cursor->nr) {
@@ -2581,7 +2582,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
* But does not need to save current cursor node for entry 0.
* It's impossible to stitch the whole LBRs of previous sample.
*/
- if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
+ if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
if (!cursor->curr)
cursor->curr = cursor->first;
else
@@ -2634,7 +2635,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
struct callchain_cursor *cursor)
{
- struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct callchain_cursor_node *cnode;
struct stitch_list *stitch_node;
int err;
@@ -2658,7 +2659,7 @@ static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
static struct stitch_list *get_stitch_node(struct thread *thread)
{
- struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct stitch_list *stitch_node;
if (!list_empty(&lbr_stitch->free_lists)) {
@@ -2682,7 +2683,7 @@ static bool has_stitched_lbr(struct thread *thread,
struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
struct branch_stack *prev_stack = prev->branch_stack;
struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
- struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
int i, j, nr_identical_branches = 0;
struct stitch_list *stitch_node;
u64 cur_base, distance;
@@ -2746,27 +2747,29 @@ static bool has_stitched_lbr(struct thread *thread,
static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
{
- if (thread->lbr_stitch)
+ if (thread__lbr_stitch(thread))
return true;
- thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
- if (!thread->lbr_stitch)
+ thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
+ if (!thread__lbr_stitch(thread))
goto err;
- thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
- if (!thread->lbr_stitch->prev_lbr_cursor)
+ thread__lbr_stitch(thread)->prev_lbr_cursor =
+ calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
+ if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
goto free_lbr_stitch;
- INIT_LIST_HEAD(&thread->lbr_stitch->lists);
- INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
+ INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
+ INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
return true;
free_lbr_stitch:
- zfree(&thread->lbr_stitch);
+ free(thread__lbr_stitch(thread));
+ thread__set_lbr_stitch(thread, NULL);
err:
pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
- thread->lbr_stitch_enable = false;
+ thread__set_lbr_stitch_enable(thread, false);
return false;
}
@@ -2802,9 +2805,9 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
if (i == chain_nr)
return 0;
- if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
+ if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
(max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
- lbr_stitch = thread->lbr_stitch;
+ lbr_stitch = thread__lbr_stitch(thread);
stitched_lbr = has_stitched_lbr(thread, sample,
&lbr_stitch->prev_sample,
@@ -2884,7 +2887,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
static u64 get_leaf_frame_caller(struct perf_sample *sample,
struct thread *thread, int usr_idx)
{
- if (machine__normalized_is(maps__machine(thread->maps), "arm64"))
+ if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
else
return 0;
@@ -3265,7 +3268,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
if (!thread)
return -ENOMEM;
- thread->cpu = cpu;
+ thread__set_cpu(thread, cpu);
thread__put(thread);
return 0;
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 4d9944bbf5e4..ae1d54d4880a 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -137,7 +137,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
no_dso = is_no_dso_memory(filename);
map->prot = prot;
map->flags = flags;
- nsi = nsinfo__get(thread->nsinfo);
+ nsi = nsinfo__get(thread__nsinfo(thread));
if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
snprintf(newfilename, sizeof(newfilename),
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index 1aeb1db58fe5..5ae6379a1b42 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -384,7 +384,7 @@ put_map:
*/
int maps__clone(struct thread *thread, struct maps *parent)
{
- struct maps *maps = thread->maps;
+ struct maps *maps = thread__maps(thread);
int err;
struct map_rb_node *rb_node;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 40964078f92f..f3d262e871ac 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1163,11 +1163,11 @@ static int python_export_thread(struct db_export *dbe, struct thread *thread,
t = tuple_new(5);
- tuple_set_d64(t, 0, thread->db_id);
+ tuple_set_d64(t, 0, thread__db_id(thread));
tuple_set_d64(t, 1, machine->db_id);
tuple_set_d64(t, 2, main_thread_db_id);
- tuple_set_s32(t, 3, thread->pid_);
- tuple_set_s32(t, 4, thread->tid);
+ tuple_set_s32(t, 3, thread__pid(thread));
+ tuple_set_s32(t, 4, thread__tid(thread));
call_object(tables->thread_handler, t, "thread_table");
@@ -1186,7 +1186,7 @@ static int python_export_comm(struct db_export *dbe, struct comm *comm,
tuple_set_d64(t, 0, comm->db_id);
tuple_set_string(t, 1, comm__str(comm));
- tuple_set_d64(t, 2, thread->db_id);
+ tuple_set_d64(t, 2, thread__db_id(thread));
tuple_set_d64(t, 3, comm->start);
tuple_set_s32(t, 4, comm->exec);
@@ -1207,7 +1207,7 @@ static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
tuple_set_d64(t, 0, db_id);
tuple_set_d64(t, 1, comm->db_id);
- tuple_set_d64(t, 2, thread->db_id);
+ tuple_set_d64(t, 2, thread__db_id(thread));
call_object(tables->comm_thread_handler, t, "comm_thread_table");
@@ -1292,7 +1292,7 @@ static void python_export_sample_table(struct db_export *dbe,
tuple_set_d64(t, 0, es->db_id);
tuple_set_d64(t, 1, es->evsel->db_id);
tuple_set_d64(t, 2, maps__machine(es->al->maps)->db_id);
- tuple_set_d64(t, 3, es->al->thread->db_id);
+ tuple_set_d64(t, 3, thread__db_id(es->al->thread));
tuple_set_d64(t, 4, es->comm_db_id);
tuple_set_d64(t, 5, es->dso_db_id);
tuple_set_d64(t, 6, es->sym_db_id);
@@ -1382,7 +1382,7 @@ static int python_export_call_return(struct db_export *dbe,
t = tuple_new(14);
tuple_set_d64(t, 0, cr->db_id);
- tuple_set_d64(t, 1, cr->thread->db_id);
+ tuple_set_d64(t, 1, thread__db_id(cr->thread));
tuple_set_d64(t, 2, comm_db_id);
tuple_set_d64(t, 3, cr->cp->db_id);
tuple_set_d64(t, 4, cr->call_time);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index e2806791c76a..65ac9f7fdf7e 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2807,7 +2807,7 @@ static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
if (!thread)
return -ENOMEM;
- thread->guest_cpu = guest_cpu;
+ thread__set_guest_cpu(thread, guest_cpu);
thread__put(thread);
return 0;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 650cd8df4041..5e45c770f91d 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -108,7 +108,7 @@ static int64_t cmp_null(const void *l, const void *r)
static int64_t
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return right->thread->tid - left->thread->tid;
+ return thread__tid(right->thread) - thread__tid(left->thread);
}
static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
@@ -117,7 +117,7 @@ static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
const char *comm = thread__comm_str(he->thread);
width = max(7U, width) - 8;
- return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
+ return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
width, width, comm ?: "");
}
@@ -1543,8 +1543,10 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
!l_dso->id.ino && !l_dso->id.ino_generation) {
/* userspace anonymous */
- if (left->thread->pid_ > right->thread->pid_) return -1;
- if (left->thread->pid_ < right->thread->pid_) return 1;
+ if (thread__pid(left->thread) > thread__pid(right->thread))
+ return -1;
+ if (thread__pid(left->thread) < thread__pid(right->thread))
+ return 1;
}
addr:
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index 4b85c1728012..374d142e7390 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -112,7 +112,7 @@ struct thread_stack {
*/
static inline bool thread_stack__per_cpu(struct thread *thread)
{
- return !(thread->tid || thread->pid_);
+ return !(thread__tid(thread) || thread__pid(thread));
}
static int thread_stack__grow(struct thread_stack *ts)
@@ -155,8 +155,8 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
ts->br_stack_sz = br_stack_sz;
}
- if (thread->maps && maps__machine(thread->maps)) {
- struct machine *machine = maps__machine(thread->maps);
+ if (thread__maps(thread) && maps__machine(thread__maps(thread))) {
+ struct machine *machine = maps__machine(thread__maps(thread));
const char *arch = perf_env__arch(machine->env);
ts->kernel_start = machine__kernel_start(machine);
@@ -175,7 +175,7 @@ static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
bool callstack,
unsigned int br_stack_sz)
{
- struct thread_stack *ts = thread->ts, *new_ts;
+ struct thread_stack *ts = thread__ts(thread), *new_ts;
unsigned int old_sz = ts ? ts->arr_sz : 0;
unsigned int new_sz = 1;
@@ -189,8 +189,8 @@ static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
if (ts)
memcpy(new_ts, ts, old_sz * sizeof(*ts));
new_ts->arr_sz = new_sz;
- z