perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 71bf745..b57a027 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -84,6 +84,7 @@
{
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
+ int ret = 0;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
@@ -92,15 +93,16 @@
}
if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
- return 0;
+ goto out_put;
if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
- return -1;
+ ret = -1;
}
-
- return 0;
+out_put:
+ addr_location__put(&al);
+ return ret;
}
static int hist_entry__tty_annotate(struct hist_entry *he,
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index df6307b..daaa7dc 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -328,6 +328,7 @@
{
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
+ int ret = -1;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
@@ -338,7 +339,7 @@
if (hists__add_entry(hists, &al, sample->period,
sample->weight, sample->transaction)) {
pr_warning("problem incrementing symbol period, skipping event\n");
- return -1;
+ goto out_put;
}
/*
@@ -350,8 +351,10 @@
hists->stats.total_period += sample->period;
if (!al.filtered)
hists->stats.total_non_filtered_period += sample->period;
-
- return 0;
+ ret = 0;
+out_put:
+ addr_location__put(&al);
+ return ret;
}
static struct perf_tool tool = {
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index d6a47e8..52ec66b 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -365,6 +365,7 @@
}
}
+ thread__put(thread);
repipe:
perf_event__repipe(tool, event, sample, machine);
return 0;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index e628bf1..fe3fcb7 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -906,6 +906,7 @@
struct perf_evsel *evsel,
struct machine *machine)
{
+ int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
@@ -919,10 +920,12 @@
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
- return f(evsel, sample);
+ err = f(evsel, sample);
}
- return 0;
+ thread__put(thread);
+
+ return err;
}
static struct perf_tool perf_kmem = {
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 1f9338f..15fecd3 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -651,6 +651,7 @@
struct perf_evsel *evsel,
struct machine *machine)
{
+ int err = 0;
struct thread *thread;
struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
tool);
@@ -666,9 +667,10 @@
}
if (!handle_kvm_event(kvm, thread, evsel, sample))
- return -1;
+ err = -1;
- return 0;
+ thread__put(thread);
+ return err;
}
static int cpu_isa_config(struct perf_kvm_stat *kvm)
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index d49c2ab..de16aae 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -769,6 +769,7 @@
t = perf_session__findnew(session, st->tid);
pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
node = rb_next(node);
+ thread__put(t);
};
}
@@ -810,6 +811,7 @@
struct perf_evsel *evsel,
struct machine *machine)
{
+ int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
@@ -821,10 +823,12 @@
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
- return f(evsel, sample);
+ err = f(evsel, sample);
}
- return 0;
+ thread__put(thread);
+
+ return err;
}
static void sort_result(void)
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 675216e..da2ec06 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -74,7 +74,7 @@
}
if (al.filtered || (mem->hide_unresolved && al.sym == NULL))
- return 0;
+ goto out_put;
if (al.map != NULL)
al.map->dso->hit = 1;
@@ -103,7 +103,8 @@
symbol_conf.field_sep,
al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???",
al.sym ? al.sym->name : "???");
-
+out_put:
+ addr_location__put(&al);
return 0;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 18cb0ff..8d5118f 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -142,7 +142,7 @@
.hide_unresolved = rep->hide_unresolved,
.add_entry_cb = hist_iter__report_callback,
};
- int ret;
+ int ret = 0;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -151,10 +151,10 @@
}
if (rep->hide_unresolved && al.sym == NULL)
- return 0;
+ goto out_put;
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
- return 0;
+ goto out_put;
if (sort__mode == SORT_MODE__BRANCH)
iter.ops = &hist_iter_branch;
@@ -172,7 +172,8 @@
rep);
if (ret < 0)
pr_debug("problem adding hist entry, skipping event\n");
-
+out_put:
+ addr_location__put(&al);
return ret;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 5275bab..79273ec 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -770,7 +770,7 @@
if (child == NULL || parent == NULL) {
pr_debug("thread does not exist on fork event: child %p, parent %p\n",
child, parent);
- return 0;
+ goto out_put;
}
if (verbose) {
@@ -781,6 +781,9 @@
register_pid(sched, parent->tid, thread__comm_str(parent));
register_pid(sched, child->tid, thread__comm_str(child));
+out_put:
+ thread__put(child);
+ thread__put(parent);
return 0;
}
@@ -957,7 +960,7 @@
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
u64 timestamp0, timestamp = sample->time;
- int cpu = sample->cpu;
+ int cpu = sample->cpu, err = -1;
s64 delta;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -976,15 +979,17 @@
sched_out = machine__findnew_thread(machine, -1, prev_pid);
sched_in = machine__findnew_thread(machine, -1, next_pid);
+ if (sched_out == NULL || sched_in == NULL)
+ goto out_put;
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
if (thread_atoms_insert(sched, sched_out))
- return -1;
+ goto out_put;
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
pr_err("out-event: Internal tree error");
- return -1;
+ goto out_put;
}
}
if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
@@ -993,22 +998,25 @@
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
if (thread_atoms_insert(sched, sched_in))
- return -1;
+ goto out_put;
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
pr_err("in-event: Internal tree error");
- return -1;
+ goto out_put;
}
/*
* Take came in we have not heard about yet,
* add in an initial atom in runnable state:
*/
if (add_sched_out_event(in_events, 'R', timestamp))
- return -1;
+ goto out_put;
}
add_sched_in_event(in_events, timestamp);
-
- return 0;
+ err = 0;
+out_put:
+ thread__put(sched_out);
+ thread__put(sched_in);
+ return err;
}
static int latency_runtime_event(struct perf_sched *sched,
@@ -1021,23 +1029,29 @@
struct thread *thread = machine__findnew_thread(machine, -1, pid);
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
u64 timestamp = sample->time;
- int cpu = sample->cpu;
+ int cpu = sample->cpu, err = -1;
+
+ if (thread == NULL)
+ return -1;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
if (!atoms) {
if (thread_atoms_insert(sched, thread))
- return -1;
+ goto out_put;
atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
if (!atoms) {
pr_err("in-event: Internal tree error");
- return -1;
+ goto out_put;
}
if (add_sched_out_event(atoms, 'R', timestamp))
- return -1;
+ goto out_put;
}
add_runtime_event(atoms, runtime, timestamp);
- return 0;
+ err = 0;
+out_put:
+ thread__put(thread);
+ return err;
}
static int latency_wakeup_event(struct perf_sched *sched,
@@ -1050,19 +1064,22 @@
struct work_atom *atom;
struct thread *wakee;
u64 timestamp = sample->time;
+ int err = -1;
wakee = machine__findnew_thread(machine, -1, pid);
+ if (wakee == NULL)
+ return -1;
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, wakee))
- return -1;
+ goto out_put;
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
pr_err("wakeup-event: Internal tree error");
- return -1;
+ goto out_put;
}
if (add_sched_out_event(atoms, 'S', timestamp))
- return -1;
+ goto out_put;
}
BUG_ON(list_empty(&atoms->work_list));
@@ -1081,17 +1098,21 @@
* skip in this case.
*/
if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
- return 0;
+ goto out_ok;
sched->nr_timestamps++;
if (atom->sched_out_time > timestamp) {
sched->nr_unordered_timestamps++;
- return 0;
+ goto out_ok;
}
atom->state = THREAD_WAIT_CPU;
atom->wake_up_time = timestamp;
- return 0;
+out_ok:
+ err = 0;
+out_put:
+ thread__put(wakee);
+ return err;
}
static int latency_migrate_task_event(struct perf_sched *sched,
@@ -1104,6 +1125,7 @@
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *migrant;
+ int err = -1;
/*
* Only need to worry about migration when profiling one CPU.
@@ -1112,18 +1134,20 @@
return 0;
migrant = machine__findnew_thread(machine, -1, pid);
+ if (migrant == NULL)
+ return -1;
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, migrant))
- return -1;
+ goto out_put;
register_pid(sched, migrant->tid, thread__comm_str(migrant));
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
pr_err("migration-event: Internal tree error");
- return -1;
+ goto out_put;
}
if (add_sched_out_event(atoms, 'R', timestamp))
- return -1;
+ goto out_put;
}
BUG_ON(list_empty(&atoms->work_list));
@@ -1135,8 +1159,10 @@
if (atom->sched_out_time > timestamp)
sched->nr_unordered_timestamps++;
-
- return 0;
+ err = 0;
+out_put:
+ thread__put(migrant);
+ return err;
}
static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
@@ -1330,8 +1356,10 @@
}
sched_in = machine__findnew_thread(machine, -1, next_pid);
+ if (sched_in == NULL)
+ return -1;
- sched->curr_thread[this_cpu] = sched_in;
+ sched->curr_thread[this_cpu] = thread__get(sched_in);
printf(" ");
@@ -1381,6 +1409,8 @@
printf("\n");
}
+ thread__put(sched_in);
+
return 0;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 6805098..24809787 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -607,13 +607,14 @@
}
if (al.filtered)
- return 0;
+ goto out_put;
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
- return 0;
+ goto out_put;
scripting_ops->process_event(event, sample, evsel, &al);
-
+out_put:
+ addr_location__put(&al);
return 0;
}
@@ -681,8 +682,8 @@
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
ret = 0;
-
out:
+ thread__put(thread);
return ret;
}
@@ -713,6 +714,7 @@
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
+ thread__put(thread);
return 0;
}
@@ -721,6 +723,7 @@
struct perf_sample *sample,
struct machine *machine)
{
+ int err = 0;
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
@@ -742,9 +745,10 @@
perf_event__fprintf(event, stdout);
if (perf_event__process_exit(tool, event, sample, machine) < 0)
- return -1;
+ err = -1;
- return 0;
+ thread__put(thread);
+ return err;
}
static int process_mmap_event(struct perf_tool *tool,
@@ -774,7 +778,7 @@
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
-
+ thread__put(thread);
return 0;
}
@@ -805,7 +809,7 @@
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
-
+ thread__put(thread);
return 0;
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index e50fe11..3b884e3 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -523,7 +523,7 @@
* Discard all.
*/
zfree(&p);
- goto exit;
+ goto exit_put;
}
continue;
}
@@ -538,7 +538,8 @@
else
fprintf(f, "..... %016" PRIx64 "\n", ip);
}
-
+exit_put:
+ addr_location__put(&al);
exit:
fclose(f);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1cb3436..2326583 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -793,7 +793,7 @@
pthread_mutex_unlock(&hists->lock);
}
- return;
+ addr_location__put(&al);
}
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index d1139b6..bb05e44 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1712,7 +1712,7 @@
void *args;
size_t printed = 0;
struct thread *thread;
- int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+ int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
@@ -1725,14 +1725,14 @@
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
- return -1;
+ goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
if (ttrace->entry_str == NULL) {
ttrace->entry_str = malloc(1024);
if (!ttrace->entry_str)
- return -1;
+ goto out_put;
}
if (!trace->summary_only)
@@ -1757,8 +1757,10 @@
thread__put(trace->current);
trace->current = thread__get(thread);
}
-
- return 0;
+ err = 0;
+out_put:
+ thread__put(thread);
+ return err;
}
static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
@@ -1768,7 +1770,7 @@
long ret;
u64 duration = 0;
struct thread *thread;
- int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+ int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
@@ -1781,7 +1783,7 @@
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
- return -1;
+ goto out_put;
if (trace->summary)
thread__update_stats(ttrace, id, sample);
@@ -1835,8 +1837,10 @@
fputc('\n', trace->output);
out:
ttrace->entry_pending = false;
-
- return 0;
+ err = 0;
+out_put:
+ thread__put(thread);
+ return err;
}
static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
@@ -1863,6 +1867,7 @@
ttrace->runtime_ms += runtime_ms;
trace->runtime_ms += runtime_ms;
+ thread__put(thread);
return 0;
out_dump:
@@ -1872,6 +1877,7 @@
(pid_t)perf_evsel__intval(evsel, sample, "pid"),
runtime,
perf_evsel__intval(evsel, sample, "vruntime"));
+ thread__put(thread);
return 0;
}
@@ -1924,11 +1930,12 @@
struct addr_location al;
char map_type = 'd';
struct thread_trace *ttrace;
+ int err = -1;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
- return -1;
+ goto out_put;
if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
ttrace->pfmaj++;
@@ -1936,7 +1943,7 @@
ttrace->pfmin++;
if (trace->summary_only)
- return 0;
+ goto out;
thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
sample->ip, &al);
@@ -1967,8 +1974,11 @@
print_location(trace->output, sample, &al, true, false);
fprintf(trace->output, " (%c%c)\n", map_type, al.level);
-
- return 0;
+out:
+ err = 0;
+out_put:
+ thread__put(thread);
+ return err;
}
static bool skip_sample(struct trace *trace, struct perf_sample *sample)
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index ca0e480..e2a432b 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -248,6 +248,7 @@
struct perf_sample sample;
struct thread *thread;
u8 cpumode;
+ int ret;
if (perf_evlist__parse_sample(evlist, event, &sample)) {
pr_debug("perf_evlist__parse_sample failed\n");
@@ -262,7 +263,9 @@
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- return read_object_code(sample.ip, READLEN, cpumode, thread, state);
+ ret = read_object_code(sample.ip, READLEN, cpumode, thread, state);
+ thread__put(thread);
+ return ret;
}
static int process_event(struct machine *machine, struct perf_evlist *evlist,
@@ -457,13 +460,13 @@
thread = machine__findnew_thread(machine, pid, pid);
if (!thread) {
pr_debug("machine__findnew_thread failed\n");
- goto out_err;
+ goto out_put;
}
cpus = cpu_map__new(NULL);
if (!cpus) {
pr_debug("cpu_map__new failed\n");
- goto out_err;
+ goto out_put;
}
while (1) {
@@ -472,7 +475,7 @@
evlist = perf_evlist__new();
if (!evlist) {
pr_debug("perf_evlist__new failed\n");
- goto out_err;
+ goto out_put;
}
perf_evlist__set_maps(evlist, cpus, threads);
@@ -485,7 +488,7 @@
ret = parse_events(evlist, str, NULL);
if (ret < 0) {
pr_debug("parse_events failed\n");
- goto out_err;
+ goto out_put;
}
perf_evlist__config(evlist, &opts);
@@ -506,7 +509,7 @@
continue;
}
pr_debug("perf_evlist__open failed\n");
- goto out_err;
+ goto out_put;
}
break;
}
@@ -514,7 +517,7 @@
ret = perf_evlist__mmap(evlist, UINT_MAX, false);
if (ret < 0) {
pr_debug("perf_evlist__mmap failed\n");
- goto out_err;
+ goto out_put;
}
perf_evlist__enable(evlist);
@@ -525,7 +528,7 @@
ret = process_events(machine, evlist, &state);
if (ret < 0)
- goto out_err;
+ goto out_put;
if (!have_vmlinux && !have_kcore && !try_kcore)
err = TEST_CODE_READING_NO_KERNEL_OBJ;
@@ -535,7 +538,10 @@
err = TEST_CODE_READING_NO_ACCESS;
else
err = TEST_CODE_READING_OK;
+out_put:
+ thread__put(thread);
out_err:
+
if (evlist) {
perf_evlist__delete(evlist);
} else {
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 0bf06be..9b748e1 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -170,6 +170,7 @@
}
err = krava_1(thread);
+ thread__put(thread);
out:
machine__delete_threads(machine);
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index a62c091..456f884 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -96,6 +96,7 @@
goto out;
thread__set_comm(thread, fake_threads[i].comm, 0);
+ thread__put(thread);
}
for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index b08a95a..620f626 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -105,8 +105,10 @@
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
- PERF_MAX_STACK_DEPTH, NULL) < 0)
+ PERF_MAX_STACK_DEPTH, NULL) < 0) {
+ addr_location__put(&al);
goto out;
+ }
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 108488c..82e1ee5 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -82,8 +82,10 @@
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
- PERF_MAX_STACK_DEPTH, NULL) < 0)
+ PERF_MAX_STACK_DEPTH, NULL) < 0) {
+ addr_location__put(&al);
goto out;
+ }
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 34c61e4..8c102b0 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -91,8 +91,10 @@
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
- if (he == NULL)
+ if (he == NULL) {
+ addr_location__put(&al);
goto out;
+ }
fake_common_samples[k].thread = al.thread;
fake_common_samples[k].map = al.map;
@@ -115,8 +117,10 @@
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
- if (he == NULL)
+ if (he == NULL) {
+ addr_location__put(&al);
goto out;
+ }
fake_samples[i][k].thread = al.thread;
fake_samples[i][k].map = al.map;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index d8a23db..fd7ec4f 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -71,8 +71,10 @@
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
- PERF_MAX_STACK_DEPTH, NULL) < 0)
+ PERF_MAX_STACK_DEPTH, NULL) < 0) {
+ addr_location__put(&al);
goto out;
+ }
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 2113f1c..264e215 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -191,6 +191,8 @@
PERF_RECORD_MISC_USER, MAP__FUNCTION,
(unsigned long) (td->map + 1), &al);
+ thread__put(thread);
+
if (!al.map) {
pr_debug("failed, couldn't find map\n");
err = -1;
diff --git a/tools/perf/tests/thread-mg-share.c b/tools/perf/tests/thread-mg-share.c
index b028499..dc05bd6 100644
--- a/tools/perf/tests/thread-mg-share.c
+++ b/tools/perf/tests/thread-mg-share.c
@@ -64,22 +64,22 @@
TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
/* release thread group */
- thread__delete(leader);
+ thread__put(leader);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 3);
- thread__delete(t1);
+ thread__put(t1);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 2);
- thread__delete(t2);
+ thread__put(t2);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 1);
- thread__delete(t3);
+ thread__put(t3);
/* release other group */
- thread__delete(other_leader);
+ thread__put(other_leader);
TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 1);
- thread__delete(other);
+ thread__put(other);
/*
* Cannot call machine__delete_threads(machine) now,
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 61867df..ad8cfcb 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -43,6 +43,7 @@
if (al.map != NULL)
al.map->dso->hit = 1;
+ thread__put(thread);
return 0;
}
@@ -59,8 +60,10 @@
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
- if (thread)
+ if (thread) {
machine__remove_thread(machine, thread);
+ thread__put(thread);
+ }
return 0;
}
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index bb39a3f..eb7a2ac 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -122,6 +122,7 @@
int db_export__thread(struct db_export *dbe, struct thread *thread,
struct machine *machine, struct comm *comm)
{
+ struct thread *main_thread;
u64 main_thread_db_id = 0;
int err;
@@ -131,8 +132,6 @@
thread->db_id = ++dbe->thread_last_db_id;
if (thread->pid_ != -1) {
- struct thread *main_thread;
-
if (thread->pid_ == thread->tid) {
main_thread = thread;
} else {
@@ -144,14 +143,16 @@
err = db_export__thread(dbe, main_thread, machine,
comm);
if (err)
- return err;
+ goto out_put;
if (comm) {
err = db_export__comm_thread(dbe, comm, thread);
if (err)
- return err;
+ goto out_put;
}
}
main_thread_db_id = main_thread->db_id;
+ if (main_thread != thread)
+ thread__put(main_thread);
}
if (dbe->export_thread)
@@ -159,6 +160,10 @@
machine);
return 0;
+
+out_put:
+ thread__put(main_thread);
+ return err;
}
int db_export__comm(struct db_export *dbe, struct comm *comm,
@@ -303,6 +308,7 @@
if (err)
return err;
+ /* FIXME: check refcounting for get_main_thread, that calls machine__find_thread... */
main_thread = get_main_thread(al->machine, thread);
if (main_thread)
comm = machine__thread_exec_comm(al->machine, main_thread);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index db52609..a513a51 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -919,6 +919,10 @@
al->sym = NULL;
}
+/*
+ * Callers need to drop the reference to al->thread, obtained in
+ * machine__findnew_thread()
+ */
int perf_event__preprocess_sample(const union perf_event *event,
struct machine *machine,
struct addr_location *al,
@@ -979,6 +983,17 @@
return 0;
}
+/*
+ * The preprocess_sample method will return with reference counts for the
+ * in it, when done using (and perhaps getting ref counts if needing to
+ * keep a pointer to one of those entries) it must be paired with
+ * addr_location__put(), so that the refcounts can be decremented.
+ */
+void addr_location__put(struct addr_location *al)
+{
+ thread__zput(al->thread);
+}
+
bool is_bts_event(struct perf_event_attr *attr)
{
return attr->type == PERF_TYPE_HARDWARE &&
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 7eecd5e..97179ab 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -426,6 +426,8 @@
struct addr_location *al,
struct perf_sample *sample);
+void addr_location__put(struct addr_location *al);
+
struct thread;
bool is_bts_event(struct perf_event_attr *attr);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 2f47110..8b0b307 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -14,6 +14,8 @@
#include "unwind.h"
#include "linux/hash.h"
+static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
+
static void dsos__init(struct dsos *dsos)
{
INIT_LIST_HEAD(&dsos->head);
@@ -28,6 +30,7 @@
dsos__init(&machine->kernel_dsos);
machine->threads = RB_ROOT;
+ pthread_rwlock_init(&machine->threads_lock, NULL);
INIT_LIST_HEAD(&machine->dead_threads);
machine->last_match = NULL;
@@ -54,6 +57,7 @@
snprintf(comm, sizeof(comm), "[guest/%d]", pid);
thread__set_comm(thread, comm, 0);
+ thread__put(thread);
}
machine->current_tid = NULL;
@@ -91,14 +95,17 @@
void machine__delete_threads(struct machine *machine)
{
- struct rb_node *nd = rb_first(&machine->threads);
+ struct rb_node *nd;
+ pthread_rwlock_wrlock(&machine->threads_lock);
+ nd = rb_first(&machine->threads);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
nd = rb_next(nd);
- machine__remove_thread(machine, t);
+ __machine__remove_thread(machine, t, false);
}
+ pthread_rwlock_unlock(&machine->threads_lock);
}
void machine__exit(struct machine *machine)
@@ -109,6 +116,7 @@
vdso__exit(machine);
zfree(&machine->root_dir);
zfree(&machine->current_tid);
+ pthread_rwlock_destroy(&machine->threads_lock);
}
void machine__delete(struct machine *machine)
@@ -303,7 +311,7 @@
if (th->pid_ == th->tid)
return;
- leader = machine__findnew_thread(machine, th->pid_, th->pid_);
+ leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
if (!leader)
goto out_err;
@@ -336,9 +344,9 @@
pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
}
-static struct thread *__machine__findnew_thread(struct machine *machine,
- pid_t pid, pid_t tid,
- bool create)
+static struct thread *____machine__findnew_thread(struct machine *machine,
+ pid_t pid, pid_t tid,
+ bool create)
{
struct rb_node **p = &machine->threads.rb_node;
struct rb_node *parent = NULL;
@@ -393,6 +401,7 @@
*/
if (thread__init_map_groups(th, machine)) {
rb_erase(&th->rb_node, &machine->threads);
+ RB_CLEAR_NODE(&th->rb_node);
thread__delete(th);
return NULL;
}
@@ -406,16 +415,30 @@
return th;
}
+struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
+{
+ return ____machine__findnew_thread(machine, pid, tid, true);
+}
+
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
- return __machine__findnew_thread(machine, pid, tid, true);
+ struct thread *th;
+
+ pthread_rwlock_wrlock(&machine->threads_lock);
+ th = thread__get(__machine__findnew_thread(machine, pid, tid));
+ pthread_rwlock_unlock(&machine->threads_lock);
+ return th;
}
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
- return __machine__findnew_thread(machine, pid, tid, false);
+ struct thread *th;
+ pthread_rwlock_rdlock(&machine->threads_lock);
+ th = thread__get(____machine__findnew_thread(machine, pid, tid, false));
+ pthread_rwlock_unlock(&machine->threads_lock);
+ return th;
}
struct comm *machine__thread_exec_comm(struct machine *machine,
@@ -434,6 +457,7 @@
event->comm.pid,
event->comm.tid);
bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
+ int err = 0;
if (exec)
machine->comm_exec = true;
@@ -444,10 +468,12 @@
if (thread == NULL ||
__thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
- return -1;
+ err = -1;
}
- return 0;
+ thread__put(thread);
+
+ return err;
}
int machine__process_lost_event(struct machine *machine __maybe_unused,
@@ -591,12 +617,16 @@
size_t ret = 0;
struct rb_node *nd;
+ pthread_rwlock_rdlock(&machine->threads_lock);
+
for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
}
+ pthread_rwlock_unlock(&machine->threads_lock);
+
return ret;
}
@@ -1213,11 +1243,14 @@
event->mmap2.filename, type, thread);
if (map == NULL)
- goto out_problem;
+ goto out_problem_map;
thread__insert_map(thread, map);
+ thread__put(thread);
return 0;
+out_problem_map:
+ thread__put(thread);
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
return 0;
@@ -1260,31 +1293,45 @@
type, thread);
if (map == NULL)
- goto out_problem;
+ goto out_problem_map;
thread__insert_map(thread, map);
+ thread__put(thread);
return 0;
+out_problem_map:
+ thread__put(thread);
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
return 0;
}
-void machine__remove_thread(struct machine *machine, struct thread *th)
+static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
{
if (machine->last_match == th)
thread__zput(machine->last_match);
+ BUG_ON(th->refcnt.counter == 0);
+ if (lock)
+ pthread_rwlock_wrlock(&machine->threads_lock);
rb_erase(&th->rb_node, &machine->threads);
+ RB_CLEAR_NODE(&th->rb_node);
/*
* Move it first to the dead_threads list, then drop the reference,
* if this is the last reference, then the thread__delete destructor
* will be called and we will remove it from the dead_threads list.
*/
list_add_tail(&th->node, &machine->dead_threads);
+ if (lock)
+ pthread_rwlock_unlock(&machine->threads_lock);
thread__put(th);
}
+void machine__remove_thread(struct machine *machine, struct thread *th)
+{
+ return __machine__remove_thread(machine, th, true);
+}
+
int machine__process_fork_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
@@ -1294,10 +1341,13 @@
struct thread *parent = machine__findnew_thread(machine,
event->fork.ppid,
event->fork.ptid);
+ int err = 0;
/* if a thread currently exists for the thread id remove it */
- if (thread != NULL)
+ if (thread != NULL) {
machine__remove_thread(machine, thread);
+ thread__put(thread);
+ }
thread = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid);
@@ -1307,10 +1357,12 @@
if (thread == NULL || parent == NULL ||
thread__fork(thread, parent, sample->time) < 0) {
dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
- return -1;
+ err = -1;
}
+ thread__put(thread);
+ thread__put(parent);
- return 0;
+ return err;
}
int machine__process_exit_event(struct machine *machine, union perf_event *event,
@@ -1323,8 +1375,10 @@
if (dump_trace)
perf_event__fprintf_task(event, stdout);
- if (thread != NULL)
+ if (thread != NULL) {
thread__exited(thread);
+ thread__put(thread);
+ }
return 0;
}
@@ -1841,6 +1895,7 @@
return -ENOMEM;
thread->cpu = cpu;
+ thread__put(thread);
return 0;
}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 1d99296..c7963c6 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -30,6 +30,7 @@
bool comm_exec;
char *root_dir;
struct rb_root threads;
+ pthread_rwlock_t threads_lock;
struct list_head dead_threads;
struct thread *last_match;
struct vdso_info *vdso_info;
@@ -151,8 +152,8 @@
return machine ? machine->pid == HOST_KERNEL_ID : false;
}
-struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
- pid_t tid);
+struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
size_t machine__fprintf(struct machine *machine, FILE *fp);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 1b26552..16c28a3 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -18,7 +18,7 @@
if (pid == thread->tid || pid == -1) {
thread->mg = map_groups__new(machine);
} else {
- leader = machine__findnew_thread(machine, pid, pid);
+ leader = __machine__findnew_thread(machine, pid, pid);
if (leader)
thread->mg = map_groups__get(leader->mg);
}
@@ -54,6 +54,8 @@
list_add(&comm->list, &thread->comm_list);
atomic_set(&thread->refcnt, 0);
+ INIT_LIST_HEAD(&thread->node);
+ RB_CLEAR_NODE(&thread->rb_node);
}
return thread;
@@ -67,6 +69,9 @@
{
struct comm *comm, *tmp;
+ BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
+ BUG_ON(!list_empty(&thread->node));
+
thread_stack__free(thread);
if (thread->mg) {
@@ -84,7 +89,8 @@
struct thread *thread__get(struct thread *thread)
{
- atomic_inc(&thread->refcnt);
+ if (thread)
+ atomic_inc(&thread->refcnt);
return thread;
}