Em Fri, May 15, 2015 at 11:08:04AM +0900, Namhyung Kim escreveu: > Hi Arnaldo, > > On Thu, May 14, 2015 at 10:18:27AM -0300, Arnaldo Carvalho de Melo wrote: > > Em Thu, May 14, 2015 at 05:23:30PM +0900, Namhyung Kim escreveu: > > > On Mon, May 11, 2015 at 11:06:26AM -0300, Arnaldo Carvalho de Melo wrote: > > We need to improve this segfault backtrace, I have to always use > > addr2line to resolve those missing entries, i.e. if you try: > > > > addr2line -fe /path/to/your/perf 0x4dd9c8 > > addr2line -fe /path/to/your/perf 0x4e2580 > > > > We would have resolved those lines :-/ > > Right, I'll add it to my TODO list. > > Anyway, this is a backtrace using gdb.. Ok, reproduced here:
[acme@ibm-x3650m4-01 linux]$ fg gdb perf list 134 if (verbose) { 135 dso_name_l = dso_l->long_name; 136 dso_name_r = dso_r->long_name; 137 } else { 138 dso_name_l = dso_l->short_name; 139 dso_name_r = dso_r->short_name; 140 } 141 142 return strcmp(dso_name_l, dso_name_r); 143 } (gdb) p dso_l $2 = (struct dso *) 0x1924ba0 (gdb) $3 = (struct dso *) 0x1924ba0 (gdb) p dso_r $4 = (struct dso *) 0x1 (gdb) bt #0 0x00000000004f557b in _sort__dso_cmp (map_l=0x182ab3120, map_r=0xd5325b0) at util/sort.c:139 #1 0x00000000004f55f1 in sort__dso_cmp (left=0x606c7f0, right=0x7fffffffb850) at util/sort.c:148 #2 0x00000000004f8470 in __sort__hpp_cmp (fmt=0x1922fb0, a=0x606c7f0, b=0x7fffffffb850) at util/sort.c:1313 #3 0x00000000004fc3b8 in hist_entry__cmp (left=0x606c7f0, right=0x7fffffffb850) at util/hist.c:911 #4 0x00000000004fafcc in add_hist_entry (hists=0x1922d80, entry=0x7fffffffb850, al=0x7fffffffbbe0, sample_self=false) at util/hist.c:389 #5 0x00000000004fb350 in __hists__add_entry (hists=0x1922d80, al=0x7fffffffbbe0, sym_parent=0x0, bi=0x0, mi=0x0, period=557536, weight=0, transaction=0, sample_self=false) at util/hist.c:471 #6 0x00000000004fc03c in iter_add_next_cumulative_entry (iter=0x7fffffffbc10, al=0x7fffffffbbe0) at util/hist.c:797 #7 0x00000000004fc291 in hist_entry_iter__add (iter=0x7fffffffbc10, al=0x7fffffffbbe0, evsel=0x1922c50, sample=0x7fffffffbdf0, max_stack_depth=127, arg=0x7fffffffc810) at util/hist.c:882 #8 0x000000000042f1b7 in process_sample_event (tool=0x7fffffffc810, event=0x7ffed74b41e0, sample=0x7fffffffbdf0, evsel=0x1922c50, machine=0x19213d0) at builtin-report.c:171 #9 0x00000000004da272 in perf_evlist__deliver_sample (evlist=0x1922260, tool=0x7fffffffc810, event=0x7ffed74b41e0, sample=0x7fffffffbdf0, evsel=0x1922c50, machine=0x19213d0) at util/session.c:1000 #10 0x00000000004da40c in machines__deliver_event (machines=0x19213d0, evlist=0x1922260, event=0x7ffed74b41e0, sample=0x7fffffffbdf0, tool=0x7fffffffc810, file_offset=1097646560) at util/session.c:1037 #11 0x00000000004da659 in perf_session__deliver_event (session=0x1921310, event=0x7ffed74b41e0, sample=0x7fffffffbdf0, tool=0x7fffffffc810, file_offset=1097646560) at util/session.c:1082 #12 0x00000000004d7d7b in ordered_events__deliver_event (oe=0x1921558, event=0x2050430) at util/session.c:109 #13 0x00000000004dd65b in __ordered_events__flush (oe=0x1921558) at util/ordered-events.c:207 #14 0x00000000004dd92f in ordered_events__flush (oe=0x1921558, how=OE_FLUSH__ROUND) at util/ordered-events.c:271 #15 0x00000000004d94c8 in process_finished_round (tool=0x7fffffffc810, event=0x7ffed74c6830, oe=0x1921558) at util/session.c:663 #16 0x00000000004da7cd in perf_session__process_user_event (session=0x1921310, event=0x7ffed74c6830, file_offset=1097721904) at util/session.c:1119 #17 0x00000000004daced in perf_session__process_event (session=0x1921310, event=0x7ffed74c6830, file_offset=1097721904) at util/session.c:1232 #18 0x00000000004db811 in __perf_session__process_events (session=0x1921310, data_offset=232, data_size=5774474704, file_size=5774474936) at util/session.c:1533 #19 0x00000000004dba01 in perf_session__process_events (session=0x1921310) at util/session.c:1580 #20 0x000000000042ff9f in __cmd_report (rep=0x7fffffffc810) at builtin-report.c:487 #21 0x00000000004315d9 in cmd_report (argc=0, argv=0x7fffffffddd0, prefix=0x0) at builtin-report.c:878 #22 0x0000000000490fb8 in run_builtin (p=0x886528 <commands+168>, argc=1, argv=0x7fffffffddd0) at perf.c:370 #23 0x0000000000491217 in handle_internal_command (argc=1, argv=0x7fffffffddd0) at perf.c:429 #24 0x0000000000491363 in run_argv (argcp=0x7fffffffdc2c, argv=0x7fffffffdc20) at perf.c:473 #25 0x00000000004916c4 in main (argc=1, argv=0x7fffffffddd0) at perf.c:588 (gdb) Looking at the frame #1 I see: (gdb) p left->hists $22 = (struct hists *) 0x1922d80 (gdb) p right->hists $23 = (struct hists *) 0x1922d80 (gdb) I.e. both look like fine hist_entry instances, both are on the same struct hists, but: (gdb) p right->ms.map->dso $25 = (struct dso *) 0x1924ba0 (gdb) p right->ms.ma There is no member named ma. (gdb) p right->ms.map $26 = (struct map *) 0x182ab3120 (gdb) p right->ms.map->dso $27 = (struct dso *) 0x1924ba0 (gdb) p right->ms.map->dso->name $28 = 0x1924cc0 "/usr/lib64/libc-2.18.so" (gdb) p left->ms.map $29 = (struct map *) 0xd5325b0 (gdb) p left->ms.map->dso $30 = (struct dso *) 0x1 (gdb) p *left->ms.map $31 = {{rb_node = {__rb_parent_color = 0, rb_right = 0x118cdc3c8, rb_left = 0x0}, node = {next = 0x0, prev = 0x118cdc3c8}}, start = 0, end = 0, type = 71 'G', referenced = 138, erange_warned = 8, priv = 0, prot = 0, flags = 0, pgoff = 81, reloc = 559687, maj = 0, min = 0, ino = 559687, ino_generation = 0, map_ip = 0x0, unmap_ip = 0x0, dso = 0x1, groups = 0xd5302a0} (gdb) Will continue investigating later... - Arnaldo > Program received signal SIGSEGV, Segmentation fault. > 0x00007ffff5fb229e in __strcmp_sse2_unaligned () from /usr/lib/libc.so.6 > (gdb) bt > #0 0x00007ffff5fb229e in __strcmp_sse2_unaligned () from /usr/lib/libc.so.6 > #1 0x00000000004d3948 in _sort__dso_cmp (map_r=<optimized out>, > map_l=<optimized out>) at util/sort.c:142 > #2 sort__dso_cmp (left=<optimized out>, right=<optimized out>) at > util/sort.c:148 > #3 0x00000000004d7f08 in hist_entry__cmp (right=0x7fffffffc530, > left=0x323a27f0) at util/hist.c:911 > #4 add_hist_entry (sample_self=true, al=0x7fffffffc710, > entry=0x7fffffffc530, hists=0x18f6690) at util/hist.c:389 > #5 __hists__add_entry (hists=0x18f6690, al=0x7fffffffc710, > sym_parent=<optimized out>, bi=bi@entry=0x0, mi=mi@entry=0x0, > period=<optimized out>, > weight=0, transaction=0, sample_self=true) at util/hist.c:471 > #6 0x00000000004d8234 in iter_add_single_normal_entry (iter=0x7fffffffc740, > al=<optimized out>) at util/hist.c:662 > #7 0x00000000004d8765 in hist_entry_iter__add (iter=0x7fffffffc740, > al=0x7fffffffc710, evsel=0x18f6550, sample=<optimized out>, > max_stack_depth=<optimized out>, arg=0x7fffffffd0a0) at util/hist.c:871 > #8 0x0000000000436353 in process_sample_event (tool=0x7fffffffd0a0, > event=<optimized out>, sample=0x7fffffffc870, evsel=0x18f6550, > machine=<optimized out>) at builtin-report.c:171 > #9 0x00000000004bbe23 in perf_evlist__deliver_sample (machine=0x18f4cc0, > evsel=0x18f6550, sample=0x7fffffffc870, event=0x7fffe0bd3220, > tool=0x7fffffffd0a0, evlist=0x18f5b50) at util/session.c:972 > #10 machines__deliver_event (machines=machines@entry=0x18f4cc0, > evlist=<optimized out>, event=event@entry=0x7fffe0bd3220, > sample=sample@entry=0x7fffffffc870, tool=tool@entry=0x7fffffffd0a0, > file_offset=file_offset@entry=1821434400) at util/session.c:1009 > #11 0x00000000004bc681 in perf_session__deliver_event > (file_offset=1821434400, tool=0x7fffffffd0a0, sample=0x7fffffffc870, > event=0x7fffe0bd3220, > session=<optimized out>) at util/session.c:1050 > #12 ordered_events__deliver_event (oe=0x18f4e00, event=<optimized out>) at > util/session.c:109 > #13 0x00000000004bf12b in __ordered_events__flush (oe=0x18f4e00) at > util/ordered-events.c:207 > #14 ordered_events__flush (oe=0x18f4e00, how=OE_FLUSH__ROUND) at > util/ordered-events.c:271 > #15 0x00000000004bc8aa in perf_session__process_user_event > (file_offset=1821919744, event=0x7fffe0c49a00, session=0x18f4c00) at > util/session.c:1087 > #16 perf_session__process_event (session=session@entry=0x18f4c00, > event=event@entry=0x7fffe0c49a00, file_offset=file_offset@entry=1821919744) > at util/session.c:1200 > #17 0x00000000004bdcd0 in __perf_session__process_events > (file_size=2160054608, data_size=<optimized out>, data_offset=<optimized out>, > session=0x18f4c00) at util/session.c:1501 > #18 perf_session__process_events (session=session@entry=0x18f4c00) at > util/session.c:1548 > #19 0x0000000000437609 in __cmd_report (rep=0x7fffffffd0a0) at > builtin-report.c:487 > #20 cmd_report (argc=0, argv=0x7fffffffe8d0, prefix=<optimized out>) at > builtin-report.c:878 > #21 0x000000000047de33 in run_builtin (p=p@entry=0x85a6e8 <commands+168>, > argc=argc@entry=6, argv=argv@entry=0x7fffffffe8d0) at perf.c:370 > #22 0x000000000042893a in handle_internal_command (argv=0x7fffffffe8d0, > argc=6) at perf.c:429 > #23 run_argv (argv=0x7fffffffe660, argcp=0x7fffffffe66c) at perf.c:473 > #24 main (argc=6, argv=0x7fffffffe8d0) at perf.c:588 > > > > > > But I think this is a longstanding bug in handling hist_entries, i.e. > > probably we have more than one pointer to a hist_entry and are accessing > > it in two places at the same time, with one of them deleting it and > > possibly reusing the data. > > > > > perf: Segmentation fault > > > -------- backtrace -------- > > > perf[0x51c7cb] > > > /usr/lib/libc.so.6(+0x33540)[0x7f37eb37e540] > > > /usr/lib/libc.so.6(+0x9029e)[0x7f37eb3db29e] > > > perf[0x4dd9c8] > > > perf(__hists__add_entry+0x188)[0x4e2258] > > > perf[0x4e2580] > > > perf(hist_entry_iter__add+0x9d)[0x4e2a7d] > > > perf[0x437fda] > > > perf[0x4c4c8e] > > > perf[0x4c5176] > > > perf[0x4c8bab] > > > perf[0x4c53c2] > > > perf[0x4c5f0c] > > > perf(perf_session__process_events+0xb3)[0x4c6b23] > > > perf(cmd_report+0x12a0)[0x439310] > > > perf[0x483ec3] > > > perf(main+0x60a)[0x42979a] > > > /usr/lib/libc.so.6(__libc_start_main+0xf0)[0x7f37eb36b800] > > > perf(_start+0x29)[0x4298b9] > > > [0x0] > > > > > > It seems like some memory area was corrupted.. > > > > Right, looks like use after free, for instance, freeing something still > > on a list or rbtree :-/ > > > > - Arnaldo > > -- > > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in > > the body of a message to majord...@vger.kernel.org > > More majordomo info at http://vger.kernel.org/majordomo-info.html > > Please read the FAQ at http://www.tux.org/lkml/ -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/