2013-03-11 16:17:08 +08:00
|
|
|
/* Branch trace support for GDB, the GNU debugger.
|
|
|
|
|
2020-01-01 14:20:01 +08:00
|
|
|
Copyright (C) 2013-2020 Free Software Foundation, Inc.
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
Contributed by Intel Corp. <markus.t.metzger@intel.com>
|
|
|
|
|
|
|
|
This file is part of GDB.
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>. */
|
|
|
|
|
2014-07-25 19:18:00 +08:00
|
|
|
#include "defs.h"
|
2013-03-11 16:17:08 +08:00
|
|
|
#include "btrace.h"
|
|
|
|
#include "gdbthread.h"
|
|
|
|
#include "inferior.h"
|
2019-04-07 03:38:10 +08:00
|
|
|
#include "target.h"
|
2013-03-11 16:17:08 +08:00
|
|
|
#include "record.h"
|
2019-04-03 10:04:24 +08:00
|
|
|
#include "symtab.h"
|
2019-04-07 03:38:10 +08:00
|
|
|
#include "disasm.h"
|
|
|
|
#include "source.h"
|
|
|
|
#include "filenames.h"
|
2013-03-11 16:28:58 +08:00
|
|
|
#include "xml-support.h"
|
2019-04-07 03:38:10 +08:00
|
|
|
#include "regcache.h"
|
Rename common to gdbsupport
This is the next patch in the ongoing series to move gdbsever to the
top level.
This patch just renames the "common" directory. The idea is to do
this move in two parts: first rename the directory (this patch), then
move the directory to the top. This approach makes the patches a bit
more tractable.
I chose the name "gdbsupport" for the directory. However, as this
patch was largely written by sed, we could pick a new name without too
much difficulty.
Tested by the buildbot.
gdb/ChangeLog
2019-07-09 Tom Tromey <tom@tromey.com>
* contrib/ari/gdb_ari.sh: Change common to gdbsupport.
* configure: Rebuild.
* configure.ac: Change common to gdbsupport.
* gdbsupport: Rename from common.
* acinclude.m4: Change common to gdbsupport.
* Makefile.in (CONFIG_SRC_SUBDIR, COMMON_SFILES)
(HFILES_NO_SRCDIR, stamp-version, ALLDEPFILES): Change common to
gdbsupport.
* aarch64-tdep.c, ada-lang.c, ada-lang.h, agent.c, alloc.c,
amd64-darwin-tdep.c, amd64-dicos-tdep.c, amd64-fbsd-nat.c,
amd64-fbsd-tdep.c, amd64-linux-nat.c, amd64-linux-tdep.c,
amd64-nbsd-tdep.c, amd64-obsd-tdep.c, amd64-sol2-tdep.c,
amd64-tdep.c, amd64-windows-tdep.c, arch-utils.c,
arch/aarch64-insn.c, arch/aarch64.c, arch/aarch64.h, arch/amd64.c,
arch/amd64.h, arch/arm-get-next-pcs.c, arch/arm-linux.c,
arch/arm.c, arch/i386.c, arch/i386.h, arch/ppc-linux-common.c,
arch/riscv.c, arch/riscv.h, arch/tic6x.c, arm-tdep.c, auto-load.c,
auxv.c, ax-gdb.c, ax-general.c, ax.h, breakpoint.c, breakpoint.h,
btrace.c, btrace.h, build-id.c, build-id.h, c-lang.h, charset.c,
charset.h, cli/cli-cmds.c, cli/cli-cmds.h, cli/cli-decode.c,
cli/cli-dump.c, cli/cli-option.h, cli/cli-script.c,
coff-pe-read.c, command.h, compile/compile-c-support.c,
compile/compile-c.h, compile/compile-cplus-symbols.c,
compile/compile-cplus-types.c, compile/compile-cplus.h,
compile/compile-loc2c.c, compile/compile.c, completer.c,
completer.h, contrib/ari/gdb_ari.sh, corefile.c, corelow.c,
cp-support.c, cp-support.h, cp-valprint.c, csky-tdep.c, ctf.c,
darwin-nat.c, debug.c, defs.h, disasm-selftests.c, disasm.c,
disasm.h, dtrace-probe.c, dwarf-index-cache.c,
dwarf-index-cache.h, dwarf-index-write.c, dwarf2-frame.c,
dwarf2expr.c, dwarf2loc.c, dwarf2read.c, event-loop.c,
event-top.c, exceptions.c, exec.c, extension.h, fbsd-nat.c,
features/aarch64-core.c, features/aarch64-fpu.c,
features/aarch64-pauth.c, features/aarch64-sve.c,
features/i386/32bit-avx.c, features/i386/32bit-avx512.c,
features/i386/32bit-core.c, features/i386/32bit-linux.c,
features/i386/32bit-mpx.c, features/i386/32bit-pkeys.c,
features/i386/32bit-segments.c, features/i386/32bit-sse.c,
features/i386/64bit-avx.c, features/i386/64bit-avx512.c,
features/i386/64bit-core.c, features/i386/64bit-linux.c,
features/i386/64bit-mpx.c, features/i386/64bit-pkeys.c,
features/i386/64bit-segments.c, features/i386/64bit-sse.c,
features/i386/x32-core.c, features/riscv/32bit-cpu.c,
features/riscv/32bit-csr.c, features/riscv/32bit-fpu.c,
features/riscv/64bit-cpu.c, features/riscv/64bit-csr.c,
features/riscv/64bit-fpu.c, features/tic6x-c6xp.c,
features/tic6x-core.c, features/tic6x-gp.c, filename-seen-cache.h,
findcmd.c, findvar.c, fork-child.c, gcore.c, gdb_bfd.c, gdb_bfd.h,
gdb_proc_service.h, gdb_regex.c, gdb_select.h, gdb_usleep.c,
gdbarch-selftests.c, gdbthread.h, gdbtypes.h, gnu-nat.c,
go32-nat.c, guile/guile.c, guile/scm-ports.c,
guile/scm-safe-call.c, guile/scm-type.c, i386-fbsd-nat.c,
i386-fbsd-tdep.c, i386-go32-tdep.c, i386-linux-nat.c,
i386-linux-tdep.c, i386-tdep.c, i387-tdep.c,
ia64-libunwind-tdep.c, ia64-linux-nat.c, inf-child.c,
inf-ptrace.c, infcall.c, infcall.h, infcmd.c, inferior-iter.h,
inferior.c, inferior.h, inflow.c, inflow.h, infrun.c, infrun.h,
inline-frame.c, language.h, linespec.c, linux-fork.c, linux-nat.c,
linux-tdep.c, linux-thread-db.c, location.c, machoread.c,
macrotab.h, main.c, maint.c, maint.h, memattr.c, memrange.h,
mi/mi-cmd-break.h, mi/mi-cmd-env.c, mi/mi-cmd-stack.c,
mi/mi-cmd-var.c, mi/mi-interp.c, mi/mi-main.c, mi/mi-parse.h,
minsyms.c, mips-linux-tdep.c, namespace.h,
nat/aarch64-linux-hw-point.c, nat/aarch64-linux-hw-point.h,
nat/aarch64-linux.c, nat/aarch64-sve-linux-ptrace.c,
nat/amd64-linux-siginfo.c, nat/fork-inferior.c,
nat/linux-btrace.c, nat/linux-btrace.h, nat/linux-namespaces.c,
nat/linux-nat.h, nat/linux-osdata.c, nat/linux-personality.c,
nat/linux-procfs.c, nat/linux-ptrace.c, nat/linux-ptrace.h,
nat/linux-waitpid.c, nat/mips-linux-watch.c,
nat/mips-linux-watch.h, nat/ppc-linux.c, nat/x86-dregs.c,
nat/x86-dregs.h, nat/x86-linux-dregs.c, nat/x86-linux.c,
nto-procfs.c, nto-tdep.c, objfile-flags.h, objfiles.c, objfiles.h,
obsd-nat.c, observable.h, osdata.c, p-valprint.c, parse.c,
parser-defs.h, ppc-linux-nat.c, printcmd.c, probe.c, proc-api.c,
procfs.c, producer.c, progspace.h, psymtab.h,
python/py-framefilter.c, python/py-inferior.c, python/py-ref.h,
python/py-type.c, python/python.c, record-btrace.c, record-full.c,
record.c, record.h, regcache-dump.c, regcache.c, regcache.h,
remote-fileio.c, remote-fileio.h, remote-sim.c, remote.c,
riscv-tdep.c, rs6000-aix-tdep.c, rust-exp.y, s12z-tdep.c,
selftest-arch.c, ser-base.c, ser-event.c, ser-pipe.c, ser-tcp.c,
ser-unix.c, skip.c, solib-aix.c, solib-target.c, solib.c,
source-cache.c, source.c, source.h, sparc-nat.c, spu-linux-nat.c,
stack.c, stap-probe.c, symfile-add-flags.h, symfile.c, symfile.h,
symtab.c, symtab.h, target-descriptions.c, target-descriptions.h,
target-memory.c, target.c, target.h, target/waitstatus.c,
target/waitstatus.h, thread-iter.h, thread.c, tilegx-tdep.c,
top.c, top.h, tracefile-tfile.c, tracefile.c, tracepoint.c,
tracepoint.h, tui/tui-io.c, ui-file.c, ui-out.h,
unittests/array-view-selftests.c,
unittests/child-path-selftests.c, unittests/cli-utils-selftests.c,
unittests/common-utils-selftests.c,
unittests/copy_bitwise-selftests.c, unittests/environ-selftests.c,
unittests/format_pieces-selftests.c,
unittests/function-view-selftests.c,
unittests/lookup_name_info-selftests.c,
unittests/memory-map-selftests.c, unittests/memrange-selftests.c,
unittests/mkdir-recursive-selftests.c,
unittests/observable-selftests.c,
unittests/offset-type-selftests.c, unittests/optional-selftests.c,
unittests/parse-connection-spec-selftests.c,
unittests/ptid-selftests.c, unittests/rsp-low-selftests.c,
unittests/scoped_fd-selftests.c,
unittests/scoped_mmap-selftests.c,
unittests/scoped_restore-selftests.c,
unittests/string_view-selftests.c, unittests/style-selftests.c,
unittests/tracepoint-selftests.c, unittests/unpack-selftests.c,
unittests/utils-selftests.c, unittests/xml-utils-selftests.c,
utils.c, utils.h, valarith.c, valops.c, valprint.c, value.c,
value.h, varobj.c, varobj.h, windows-nat.c, x86-linux-nat.c,
xml-support.c, xml-support.h, xml-tdesc.h, xstormy16-tdep.c,
xtensa-linux-nat.c, dwarf2read.h: Change common to gdbsupport.
gdb/gdbserver/ChangeLog
2019-07-09 Tom Tromey <tom@tromey.com>
* configure: Rebuild.
* configure.ac: Change common to gdbsupport.
* acinclude.m4: Change common to gdbsupport.
* Makefile.in (SFILES, OBS, GDBREPLAY_OBS, IPA_OBJS)
(version-generated.c, gdbsupport/%-ipa.o, gdbsupport/%.o): Change
common to gdbsupport.
* ax.c, event-loop.c, fork-child.c, gdb_proc_service.h,
gdbreplay.c, gdbthread.h, hostio-errno.c, hostio.c, i387-fp.c,
inferiors.c, inferiors.h, linux-aarch64-tdesc-selftest.c,
linux-amd64-ipa.c, linux-i386-ipa.c, linux-low.c,
linux-tic6x-low.c, linux-x86-low.c, linux-x86-tdesc-selftest.c,
linux-x86-tdesc.c, lynx-i386-low.c, lynx-low.c, mem-break.h,
nto-x86-low.c, regcache.c, regcache.h, remote-utils.c, server.c,
server.h, spu-low.c, symbol.c, target.h, tdesc.c, tdesc.h,
thread-db.c, tracepoint.c, win32-i386-low.c, win32-low.c: Change
common to gdbsupport.
2019-05-06 10:29:24 +08:00
|
|
|
#include "gdbsupport/rsp-low.h"
|
2019-04-07 03:38:10 +08:00
|
|
|
#include "gdbcmd.h"
|
|
|
|
#include "cli/cli-utils.h"
|
Don't include gdbarch.h from defs.h
I touched symtab.h and was surprised to see how many files were
rebuilt. I looked into it a bit, and found that defs.h includes
gdbarch.h, which in turn includes many things.
gdbarch.h is only needed by a minority ofthe files in gdb, so this
patch removes the include from defs.h and updates the fallout.
I did "wc -l" on the files in build/gdb/.deps; this patch reduces the
line count from 139935 to 137030; so there are definitely future
build-time savings here.
Note that while I configured with --enable-targets=all, it's possible
that some *-nat.c file needs an update. I could not test all of
these. The buildbot caught a few problems along these lines.
gdb/ChangeLog
2019-07-10 Tom Tromey <tom@tromey.com>
* defs.h: Don't include gdbarch.h.
* aarch64-ravenscar-thread.c, aarch64-tdep.c, alpha-bsd-tdep.h,
alpha-linux-tdep.c, alpha-mdebug-tdep.c, arch-utils.h, arm-tdep.h,
ax-general.c, btrace.c, buildsym-legacy.c, buildsym.h, c-lang.c,
cli/cli-decode.h, cli/cli-dump.c, cli/cli-script.h,
cli/cli-style.h, coff-pe-read.h, compile/compile-c-support.c,
compile/compile-cplus.h, compile/compile-loc2c.c, corefile.c,
cp-valprint.c, cris-linux-tdep.c, ctf.c, d-lang.c, d-namespace.c,
dcache.c, dicos-tdep.c, dictionary.c, disasm-selftests.c,
dummy-frame.c, dummy-frame.h, dwarf2-frame-tailcall.c,
dwarf2expr.c, expression.h, f-lang.c, frame-base.c,
frame-unwind.c, frv-linux-tdep.c, gdbarch-selftests.c, gdbtypes.h,
go-lang.c, hppa-nbsd-tdep.c, hppa-obsd-tdep.c, i386-dicos-tdep.c,
i386-tdep.h, ia64-vms-tdep.c, interps.h, language.c,
linux-record.c, location.h, m2-lang.c, m32r-linux-tdep.c,
mem-break.c, memattr.c, mn10300-linux-tdep.c, nios2-linux-tdep.c,
objfiles.h, opencl-lang.c, or1k-linux-tdep.c, p-lang.c,
parser-defs.h, ppc-tdep.h, probe.h, python/py-record-btrace.c,
record-btrace.c, record.h, regcache-dump.c, regcache.h,
riscv-fbsd-tdep.c, riscv-linux-tdep.c, rust-exp.y,
sh-linux-tdep.c, sh-nbsd-tdep.c, source-cache.c,
sparc-nbsd-tdep.c, sparc-obsd-tdep.c, sparc-ravenscar-thread.c,
sparc64-fbsd-tdep.c, std-regs.c, target-descriptions.h,
target-float.c, tic6x-linux-tdep.c, tilegx-linux-tdep.c, top.c,
tracefile.c, trad-frame.c, type-stack.h, ui-style.c, utils.c,
utils.h, valarith.c, valprint.c, varobj.c, x86-tdep.c,
xml-support.h, xtensa-linux-tdep.c, cli/cli-cmds.h: Update.
* s390-linux-nat.c, procfs.c, inf-ptrace.c: Likewise.
2019-06-10 05:21:02 +08:00
|
|
|
#include "gdbarch.h"
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
2018-02-02 19:29:48 +08:00
|
|
|
/* For maintenance commands. */
|
|
|
|
#include "record-btrace.h"
|
|
|
|
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
#include <inttypes.h>
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
#include <ctype.h>
|
gdb: Use std::min and std::max throughout
Otherwise including <string> or some other C++ header is broken.
E.g.:
In file included from /opt/gcc/include/c++/7.0.0/bits/char_traits.h:39:0,
from /opt/gcc/include/c++/7.0.0/string:40,
from /home/pedro/gdb/mygit/cxx-convertion/src/gdb/infrun.c:68:
/opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:243:56: error: macro "min" passed 3 arguments, but takes just 2
min(const _Tp& __a, const _Tp& __b, _Compare __comp)
^
/opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:265:56: error: macro "max" passed 3 arguments, but takes just 2
max(const _Tp& __a, const _Tp& __b, _Compare __comp)
^
In file included from .../src/gdb/infrun.c:21:0:
To the best of my grepping abilities, I believe I adjusted all min/max
calls.
gdb/ChangeLog:
2016-09-16 Pedro Alves <palves@redhat.com>
* defs.h (min, max): Delete.
* aarch64-tdep.c: Include <algorithm> and use std::min and
std::max throughout.
* aarch64-tdep.c: Likewise.
* alpha-tdep.c: Likewise.
* amd64-tdep.c: Likewise.
* amd64-windows-tdep.c: Likewise.
* arm-tdep.c: Likewise.
* avr-tdep.c: Likewise.
* breakpoint.c: Likewise.
* btrace.c: Likewise.
* ctf.c: Likewise.
* disasm.c: Likewise.
* doublest.c: Likewise.
* dwarf2loc.c: Likewise.
* dwarf2read.c: Likewise.
* environ.c: Likewise.
* exec.c: Likewise.
* f-exp.y: Likewise.
* findcmd.c: Likewise.
* ft32-tdep.c: Likewise.
* gcore.c: Likewise.
* hppa-tdep.c: Likewise.
* i386-darwin-tdep.c: Likewise.
* i386-tdep.c: Likewise.
* linux-thread-db.c: Likewise.
* lm32-tdep.c: Likewise.
* m32r-tdep.c: Likewise.
* m88k-tdep.c: Likewise.
* memrange.c: Likewise.
* minidebug.c: Likewise.
* mips-tdep.c: Likewise.
* moxie-tdep.c: Likewise.
* nds32-tdep.c: Likewise.
* nios2-tdep.c: Likewise.
* nto-procfs.c: Likewise.
* parse.c: Likewise.
* ppc-sysv-tdep.c: Likewise.
* probe.c: Likewise.
* record-btrace.c: Likewise.
* remote.c: Likewise.
* rs6000-tdep.c: Likewise.
* rx-tdep.c: Likewise.
* s390-linux-nat.c: Likewise.
* s390-linux-tdep.c: Likewise.
* ser-tcp.c: Likewise.
* sh-tdep.c: Likewise.
* sh64-tdep.c: Likewise.
* source.c: Likewise.
* sparc-tdep.c: Likewise.
* symfile.c: Likewise.
* target-memory.c: Likewise.
* target.c: Likewise.
* tic6x-tdep.c: Likewise.
* tilegx-tdep.c: Likewise.
* tracefile-tfile.c: Likewise.
* tracepoint.c: Likewise.
* valprint.c: Likewise.
* value.c: Likewise.
* xtensa-tdep.c: Likewise.
* cli/cli-cmds.c: Likewise.
* compile/compile-object-load.c: Likewise.
2016-09-17 02:55:17 +08:00
|
|
|
#include <algorithm>
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
/* Command lists for btrace maintenance commands. */
|
|
|
|
static struct cmd_list_element *maint_btrace_cmdlist;
|
|
|
|
static struct cmd_list_element *maint_btrace_set_cmdlist;
|
|
|
|
static struct cmd_list_element *maint_btrace_show_cmdlist;
|
|
|
|
static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
|
|
|
|
static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
|
|
|
|
|
|
|
|
/* Control whether to skip PAD packets when computing the packet history. */
|
Change boolean options to bool instead of int
This is for add_setshow_boolean_cmd as well as the gdb::option interface.
gdb/ChangeLog:
2019-09-17 Christian Biesinger <cbiesinger@google.com>
* ada-lang.c (ada_ignore_descriptive_types_p): Change to bool.
(print_signatures): Likewise.
(trust_pad_over_xvs): Likewise.
* arch/aarch64-insn.c (aarch64_debug): Likewise.
* arch/aarch64-insn.h (aarch64_debug): Likewise.
* arm-linux-nat.c (arm_apcs_32): Likewise.
* arm-linux-tdep.c (arm_apcs_32): Likewise.
* arm-nbsd-nat.c (arm_apcs_32): Likewise.
* arm-tdep.c (arm_debug): Likewise.
(arm_apcs_32): Likewise.
* auto-load.c (debug_auto_load): Likewise.
(auto_load_gdb_scripts): Likewise.
(global_auto_load): Likewise.
(auto_load_local_gdbinit): Likewise.
(auto_load_local_gdbinit_loaded): Likewise.
* auto-load.h (global_auto_load): Likewise.
(auto_load_local_gdbinit): Likewise.
(auto_load_local_gdbinit_loaded): Likewise.
* breakpoint.c (disconnected_dprintf): Likewise.
(breakpoint_proceeded): Likewise.
(automatic_hardware_breakpoints): Likewise.
(always_inserted_mode): Likewise.
(target_exact_watchpoints): Likewise.
(_initialize_breakpoint): Update.
* breakpoint.h (target_exact_watchpoints): Change to bool.
* btrace.c (maint_btrace_pt_skip_pad): Likewise.
* cli/cli-cmds.c (trace_commands): Likewise.
* cli/cli-cmds.h (trace_commands): Likewise.
* cli/cli-decode.c (add_setshow_boolean_cmd): Change int* argument
to bool*.
* cli/cli-logging.c (logging_overwrite): Change to bool.
(logging_redirect): Likewise.
(debug_redirect): Likewise.
* cli/cli-option.h (option_def) <boolean>: Change return type to bool*.
(struct boolean_option_def) <get_var_address_cb_>: Change return type
to bool.
<boolean_option_def>: Update.
(struct flag_option_def): Change default type of Context to bool
from int.
<flag_option_def>: Change return type of var_address_cb_ to bool*.
* cli/cli-setshow.c (do_set_command): Cast to bool* instead of int*.
(get_setshow_command_value_string): Likewise.
* cli/cli-style.c (cli_styling): Change to bool.
(source_styling): Likewise.
* cli/cli-style.h (source_styling): Likewise.
(cli_styling): Likewise.
* cli/cli-utils.h (struct qcs_flags) <quiet, cont, silent>: Change
to bool.
* command.h (var_types): Update comment.
(add_setshow_boolean_cmd): Change int* var argument to bool*.
* compile/compile-cplus-types.c (debug_compile_cplus_types): Change to
bool.
(debug_compile_cplus_scopes): Likewise.
* compile/compile-internal.h (compile_debug): Likewise.
* compile/compile.c (compile_debug): Likewise.
(struct compile_options) <raw>: Likewise.
* cp-support.c (catch_demangler_crashes): Likewise.
* cris-tdep.c (usr_cmd_cris_version_valid): Likewise.
(usr_cmd_cris_dwarf2_cfi): Likewise.
* csky-tdep.c (csky_debug): Likewise.
* darwin-nat.c (enable_mach_exceptions): Likewise.
* dcache.c (dcache_enabled_p): Likewise.
* defs.h (info_verbose): Likewise.
* demangle.c (demangle): Likewise.
(asm_demangle): Likewise.
* dwarf-index-cache.c (debug_index_cache): Likewise.
* dwarf2-frame.c (dwarf2_frame_unwinders_enabled_p): Likewise.
* dwarf2-frame.h (dwarf2_frame_unwinders_enabled_p): Likewise.
* dwarf2read.c (check_physname): Likewise.
(use_deprecated_index_sections): Likewise.
(dwarf_always_disassemble): Likewise.
* eval.c (overload_resolution): Likewise.
* event-top.c (set_editing_cmd_var): Likewise.
(exec_done_display_p): Likewise.
* event-top.h (set_editing_cmd_var): Likewise.
(exec_done_display_p): Likewise.
* exec.c (write_files): Likewise.
* fbsd-nat.c (debug_fbsd_lwp): Likewise
(debug_fbsd_nat): Likewise.
* frame.h (struct frame_print_options) <print_raw_frame_arguments>:
Likewise.
(struct set_backtrace_options) <backtrace_past_main>: Likewise.
<backtrace_past_entry> Likewise.
* gdb-demangle.h (demangle): Likewise.
(asm_demangle): Likewise.
* gdb_bfd.c (bfd_sharing): Likewise.
* gdbcore.h (write_files): Likewise.
* gdbsupport/common-debug.c (show_debug_regs): Likewise.
* gdbsupport/common-debug.h (show_debug_regs): Likewise.
* gdbthread.h (print_thread_events): Likewise.
* gdbtypes.c (opaque_type_resolution): Likewise.
(strict_type_checking): Likewise.
* gnu-nat.c (gnu_debug_flag): Likewise.
* guile/scm-auto-load.c (auto_load_guile_scripts): Likewise.
* guile/scm-param.c (pascm_variable): Add boolval.
(add_setshow_generic): Update.
(pascm_param_value): Update.
(pascm_set_param_value_x): Update.
* hppa-tdep.c (hppa_debug): Change to bool..
* infcall.c (may_call_functions_p): Likewise.
(coerce_float_to_double_p): Likewise.
(unwind_on_signal_p): Likewise.
(unwind_on_terminating_exception_p): Likewise.
* infcmd.c (startup_with_shell): Likewise.
* inferior.c (print_inferior_events): Likewise.
* inferior.h (startup_with_shell): Likewise.
(print_inferior_events): Likewise.
* infrun.c (step_stop_if_no_debug): Likewise.
(detach_fork): Likewise.
(debug_displaced): Likewise.
(disable_randomization): Likewise.
(non_stop): Likewise.
(non_stop_1): Likewise.
(observer_mode): Likewise.
(observer_mode_1): Likewise.
(set_observer_mode): Update.
(sched_multi): Change to bool.
* infrun.h (debug_displaced): Likewise.
(sched_multi): Likewise.
(step_stop_if_no_debug): Likewise.
(non_stop): Likewise.
(disable_randomization): Likewise.
* linux-tdep.c (use_coredump_filter): Likewise.
(dump_excluded_mappings): Likewise.
* linux-thread-db.c (auto_load_thread_db): Likewise.
(check_thread_db_on_load): Likewise.
* main.c (captured_main_1): Update.
* maint-test-options.c (struct test_options_opts) <flag_opt, xx1_opt,
xx2_opt, boolean_opt>: Change to bool.
* maint-test-settings.c (maintenance_test_settings_boolean): Likewise.
* maint.c (maintenance_profile_p): Likewise.
(per_command_time): Likewise.
(per_command_space): Likewise.
(per_command_symtab): Likewise.
* memattr.c (inaccessible_by_default): Likewise.
* mi/mi-main.c (mi_async): Likewise.
(mi_async_1): Likewise.
* mips-tdep.c (mips64_transfers_32bit_regs_p): Likewise.
* nat/fork-inferior.h (startup_with_shell): Likewise.
* nat/linux-namespaces.c (debug_linux_namespaces): Likewise.
* nat/linux-namespaces.h (debug_linux_namespaces): Likewise.
* nios2-tdep.c (nios2_debug): Likewise.
* or1k-tdep.c (or1k_debug): Likewise.
* parse.c (parser_debug): Likewise.
* parser-defs.h (parser_debug): Likewise.
* printcmd.c (print_symbol_filename): Likewise.
* proc-api.c (procfs_trace): Likewise.
* python/py-auto-load.c (auto_load_python_scripts): Likewise.
* python/py-param.c (union parmpy_variable): Add "bool boolval" field.
(set_parameter_value): Update.
(add_setshow_generic): Update.
* python/py-value.c (copy_py_bool_obj): Change argument from int*
to bool*.
* python/python.c (gdbpy_parameter_value): Cast to bool* instead of
int*.
* ravenscar-thread.c (ravenscar_task_support): Change to bool.
* record-btrace.c (record_btrace_target::store_registers): Update.
* record-full.c (record_full_memory_query): Change to bool.
(record_full_stop_at_limit): Likewise.
* record-full.h (record_full_memory_query): Likewise.
* remote-notif.c (notif_debug): Likewise.
* remote-notif.h (notif_debug): Likewise.
* remote.c (use_range_stepping): Likewise.
(interrupt_on_connect): Likewise.
(remote_break): Likewise.
* ser-tcp.c (tcp_auto_retry): Likewise.
* ser-unix.c (serial_hwflow): Likewise.
* skip.c (debug_skip): Likewise.
* solib-aix.c (solib_aix_debug): Likewise.
* spu-tdep.c (spu_stop_on_load_p): Likewise.
(spu_auto_flush_cache_p): Likewise.
* stack.c (struct backtrace_cmd_options) <full, no_filters, hide>:
Likewise.
(struct info_print_options) <quiet>: Likewise.
* symfile-debug.c (debug_symfile): Likewise.
* symfile.c (auto_solib_add): Likewise.
(separate_debug_file_debug): Likewise.
* symfile.h (auto_solib_add): Likewise.
(separate_debug_file_debug): Likewise.
* symtab.c (basenames_may_differ): Likewise.
(struct filename_partial_match_opts) <dirname, basename>: Likewise.
(struct info_print_options) <quiet, exclude_minsyms>: Likewise.
(struct info_types_options) <quiet>: Likewise.
* symtab.h (demangle): Likewise.
(basenames_may_differ): Likewise.
* target-dcache.c (stack_cache_enabled_1): Likewise.
(code_cache_enabled_1): Likewise.
* target.c (trust_readonly): Likewise.
(may_write_registers): Likewise.
(may_write_memory): Likewise.
(may_insert_breakpoints): Likewise.
(may_insert_tracepoints): Likewise.
(may_insert_fast_tracepoints): Likewise.
(may_stop): Likewise.
(auto_connect_native_target): Likewise.
(target_stop_and_wait): Update.
(target_async_permitted): Change to bool.
(target_async_permitted_1): Likewise.
(may_write_registers_1): Likewise.
(may_write_memory_1): Likewise.
(may_insert_breakpoints_1): Likewise.
(may_insert_tracepoints_1): Likewise.
(may_insert_fast_tracepoints_1): Likewise.
(may_stop_1): Likewise.
* target.h (target_async_permitted): Likewise.
(may_write_registers): Likewise.
(may_write_memory): Likewise.
(may_insert_breakpoints): Likewise.
(may_insert_tracepoints): Likewise.
(may_insert_fast_tracepoints): Likewise.
(may_stop): Likewise.
* thread.c (struct info_threads_opts) <show_global_ids>: Likewise.
(make_thread_apply_all_options_def_group): Change argument from int*
to bool*.
(thread_apply_all_command): Update.
(print_thread_events): Change to bool.
* top.c (confirm): Likewise.
(command_editing_p): Likewise.
(history_expansion_p): Likewise.
(write_history_p): Likewise.
(info_verbose): Likewise.
* top.h (confirm): Likewise.
(history_expansion_p): Likewise.
* tracepoint.c (disconnected_tracing): Likewise.
(circular_trace_buffer): Likewise.
* typeprint.c (print_methods): Likewise.
(print_typedefs): Likewise.
* utils.c (debug_timestamp): Likewise.
(sevenbit_strings): Likewise.
(pagination_enabled): Likewise.
* utils.h (sevenbit_strings): Likewise.
(pagination_enabled): Likewise.
* valops.c (overload_resolution): Likewise.
* valprint.h (struct value_print_options) <prettyformat_arrays,
prettyformat_structs, vtblprint, unionprint, addressprint, objectprint,
stop_print_at_null, print_array_indexes, deref_ref, static_field_print,
pascal_static_field_print, raw, summary, symbol_print, finish_print>:
Likewise.
* windows-nat.c (new_console): Likewise.
(cygwin_exceptions): Likewise.
(new_group): Likewise.
(debug_exec): Likewise.
(debug_events): Likewise.
(debug_memory): Likewise.
(debug_exceptions): Likewise.
(useshell): Likewise.
* windows-tdep.c (maint_display_all_tib): Likewise.
* xml-support.c (debug_xml): Likewise.
2019-09-15 03:36:58 +08:00
|
|
|
static bool maint_btrace_pt_skip_pad = true;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
static void btrace_add_pc (struct thread_info *tp);
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
/* Print a record debug message. Use do ... while (0) to avoid ambiguities
|
|
|
|
when used in if statements. */
|
|
|
|
|
|
|
|
#define DEBUG(msg, args...) \
|
|
|
|
do \
|
|
|
|
{ \
|
|
|
|
if (record_debug != 0) \
|
|
|
|
fprintf_unfiltered (gdb_stdlog, \
|
|
|
|
"[btrace] " msg "\n", ##args); \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
|
|
|
#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
|
|
|
|
|
|
|
|
/* Return the function name of a recorded function segment for printing.
|
|
|
|
This function never returns NULL. */
|
|
|
|
|
|
|
|
static const char *
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_print_function_name (const struct btrace_function *bfun)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
|
|
|
struct minimal_symbol *msym;
|
|
|
|
struct symbol *sym;
|
|
|
|
|
|
|
|
msym = bfun->msym;
|
|
|
|
sym = bfun->sym;
|
|
|
|
|
|
|
|
if (sym != NULL)
|
2019-11-23 02:05:14 +08:00
|
|
|
return sym->print_name ();
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
if (msym != NULL)
|
2019-11-23 02:05:14 +08:00
|
|
|
return msym->print_name ();
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
return "<unknown>";
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the file name of a recorded function segment for printing.
|
|
|
|
This function never returns NULL. */
|
|
|
|
|
|
|
|
static const char *
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_print_filename (const struct btrace_function *bfun)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
|
|
|
struct symbol *sym;
|
|
|
|
const char *filename;
|
|
|
|
|
|
|
|
sym = bfun->sym;
|
|
|
|
|
|
|
|
if (sym != NULL)
|
2014-12-23 23:21:10 +08:00
|
|
|
filename = symtab_to_filename_for_display (symbol_symtab (sym));
|
2013-03-11 16:17:08 +08:00
|
|
|
else
|
|
|
|
filename = "<unknown>";
|
|
|
|
|
|
|
|
return filename;
|
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* Return a string representation of the address of an instruction.
|
|
|
|
This function never returns NULL. */
|
2013-03-11 16:17:08 +08:00
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
static const char *
|
|
|
|
ftrace_print_insn_addr (const struct btrace_insn *insn)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
if (insn == NULL)
|
|
|
|
return "<nil>";
|
|
|
|
|
|
|
|
return core_addr_to_string_nz (insn->pc);
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* Print an ftrace debug status message. */
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
static void
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_debug (const struct btrace_function *bfun, const char *prefix)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
const char *fun, *file;
|
|
|
|
unsigned int ibegin, iend;
|
2014-06-24 20:25:50 +08:00
|
|
|
int level;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
fun = ftrace_print_function_name (bfun);
|
|
|
|
file = ftrace_print_filename (bfun);
|
|
|
|
level = bfun->level;
|
|
|
|
|
|
|
|
ibegin = bfun->insn_offset;
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
iend = ibegin + bfun->insn.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2014-06-24 20:25:50 +08:00
|
|
|
DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
|
|
|
|
prefix, fun, file, level, ibegin, iend);
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
|
|
|
|
2016-11-21 23:39:57 +08:00
|
|
|
/* Return the number of instructions in a given function call segment. */
|
|
|
|
|
|
|
|
static unsigned int
|
|
|
|
ftrace_call_num_insn (const struct btrace_function* bfun)
|
|
|
|
{
|
|
|
|
if (bfun == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* A gap is always counted as one instruction. */
|
|
|
|
if (bfun->errcode != 0)
|
|
|
|
return 1;
|
|
|
|
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
return bfun->insn.size ();
|
2016-11-21 23:39:57 +08:00
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Return the function segment with the given NUMBER or NULL if no such segment
|
|
|
|
exists. BTINFO is the branch trace information for the current thread. */
|
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
|
|
|
|
unsigned int number)
|
|
|
|
{
|
|
|
|
if (number == 0 || number > btinfo->functions.size ())
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &btinfo->functions[number - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* A const version of the function above. */
|
|
|
|
|
|
|
|
static const struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
|
|
|
|
unsigned int number)
|
|
|
|
{
|
|
|
|
if (number == 0 || number > btinfo->functions.size ())
|
|
|
|
return NULL;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
return &btinfo->functions[number - 1];
|
2017-05-30 18:47:37 +08:00
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* Return non-zero if BFUN does not match MFUN and FUN,
|
|
|
|
return zero otherwise. */
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
static int
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_function_switched (const struct btrace_function *bfun,
|
|
|
|
const struct minimal_symbol *mfun,
|
|
|
|
const struct symbol *fun)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
|
|
|
struct minimal_symbol *msym;
|
|
|
|
struct symbol *sym;
|
|
|
|
|
|
|
|
msym = bfun->msym;
|
|
|
|
sym = bfun->sym;
|
|
|
|
|
|
|
|
/* If the minimal symbol changed, we certainly switched functions. */
|
|
|
|
if (mfun != NULL && msym != NULL
|
2019-11-23 02:05:14 +08:00
|
|
|
&& strcmp (mfun->linkage_name (), msym->linkage_name ()) != 0)
|
2013-03-11 16:17:08 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* If the symbol changed, we certainly switched functions. */
|
|
|
|
if (fun != NULL && sym != NULL)
|
|
|
|
{
|
|
|
|
const char *bfname, *fname;
|
|
|
|
|
|
|
|
/* Check the function name. */
|
2019-11-23 02:05:14 +08:00
|
|
|
if (strcmp (fun->linkage_name (), sym->linkage_name ()) != 0)
|
2013-03-11 16:17:08 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Check the location of those functions, as well. */
|
2014-12-23 23:21:10 +08:00
|
|
|
bfname = symtab_to_fullname (symbol_symtab (sym));
|
|
|
|
fname = symtab_to_fullname (symbol_symtab (fun));
|
2013-03-11 16:17:08 +08:00
|
|
|
if (filename_cmp (fname, bfname) != 0)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* If we lost symbol information, we switched functions. */
|
|
|
|
if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* If we gained symbol information, we switched functions. */
|
|
|
|
if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
|
|
|
|
return 1;
|
|
|
|
|
2013-03-11 16:17:08 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Allocate and initialize a new branch trace function segment at the end of
|
|
|
|
the trace.
|
2017-05-30 18:47:37 +08:00
|
|
|
BTINFO is the branch trace information for the current thread.
|
2017-05-30 18:47:37 +08:00
|
|
|
MFUN and FUN are the symbol information we have for this function.
|
|
|
|
This invalidates all struct btrace_function pointer currently held. */
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_new_function (struct btrace_thread_info *btinfo,
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
struct minimal_symbol *mfun,
|
|
|
|
struct symbol *fun)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
int level;
|
|
|
|
unsigned int number, insn_offset;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
2013-05-13 20:57:42 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Start counting NUMBER and INSN_OFFSET at one. */
|
|
|
|
level = 0;
|
|
|
|
number = 1;
|
|
|
|
insn_offset = 1;
|
2013-05-13 20:57:42 +08:00
|
|
|
}
|
|
|
|
else
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const struct btrace_function *prev = &btinfo->functions.back ();
|
|
|
|
level = prev->level;
|
|
|
|
number = prev->number + 1;
|
|
|
|
insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
|
|
|
|
return &btinfo->functions.back ();
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* Update the UP field of a function segment. */
|
2013-03-11 16:17:08 +08:00
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
static void
|
|
|
|
ftrace_update_caller (struct btrace_function *bfun,
|
|
|
|
struct btrace_function *caller,
|
Rewrite enum_flags, add unit tests, fix problems
This patch started by adding comprehensive unit tests for enum_flags.
For the testing part, it adds:
- tests of normal expected uses of the API.
- checks that _invalid_ uses of the API would fail to compile. I.e.,
it validates that enum_flags really is a strong type, and that
incorrect mixing of enum types would be caught at compile time. It
pulls that off making use of SFINEA and C++11's decltype/constexpr.
This revealed many holes in the enum_flags API. For example, the f1
assignment below currently incorrectly fails to compile:
enum_flags<flags> f1 = FLAG1;
enum_flags<flags> f2 = FLAG2 | f1;
The unit tests also revealed that this useful use case doesn't work:
enum flag { FLAG1 = 1, FLAG2 = 2 };
enum_flags<flag> src = FLAG1;
enum_flags<flag> f1 = condition ? src : FLAG2;
It fails to compile because enum_flags<flag> and flag are convertible
to each other.
Turns out that making enum_flags be implicitly convertible to the
backing raw enum type was not a good idea.
If we make it convertible to the underlying type instead, we fix that
ternary operator use case, and, we find cases throughout the codebase
that should be using the enum_flags but were using the raw backing
enum instead. So it's a good change overall.
Also, several operators were missing.
These holes and more are plugged by this patch, by reworking how the
enum_flags operators are implemented, and making use of C++11's
feature of being able to delete methods/functions.
There are cases in gdb/compile/ where we need to call a function in a
C plugin API that expects the raw enum. To address cases like that,
this adds a "raw()" method to enum_flags. This way we can keep using
the safer enum_flags to construct the value, and then be explicit when
we need to get at the raw enum.
This makes most of the enum_flags operators constexpr. Beyond
enabling more compiler optimizations and enabling the new unit tests,
this has other advantages, like making it possible to use operator|
with enum_flags values in switch cases, where only compile-time
constants are allowed:
enum_flags<flags> f = FLAG1 | FLAG2;
switch (f)
{
case FLAG1 | FLAG2:
break;
}
Currently that fails to compile.
It also switches to a different mechanism of enabling the global
operators. The current mechanism isn't namespace friendly, the new
one is.
It also switches to C++11-style SFINAE -- instead of wrapping the
return type in a SFINAE-friently structure, we use an unnamed template
parameter. I.e., this:
template <typename enum_type,
typename = is_enum_flags_enum_type_t<enum_type>>
enum_type
operator& (enum_type e1, enum_type e2)
instead of:
template <typename enum_type>
typename enum_flags_type<enum_type>::type
operator& (enum_type e1, enum_type e2)
Note that the static_assert inside operator~() was converted to a
couple overloads (signed vs unsigned), because static_assert is too
late for SFINAE-based tests, which is important for the CHECK_VALID
unit tests.
Tested with gcc {4.8, 7.1, 9.3} and clang {5.0.2, 10.0.0}.
gdb/ChangeLog:
* Makefile.in (SELFTESTS_SRCS): Add
unittests/enum-flags-selftests.c.
* btrace.c (ftrace_update_caller, ftrace_fixup_calle): Use
btrace_function_flags instead of enum btrace_function_flag.
* compile/compile-c-types.c (convert_qualified): Use
enum_flags::raw.
* compile/compile-cplus-symbols.c (convert_one_symbol)
(convert_symbol_bmsym):
* compile/compile-cplus-types.c (compile_cplus_convert_method)
(compile_cplus_convert_struct_or_union_methods)
(compile_cplus_instance::convert_qualified_base):
* go-exp.y (parse_string_or_char): Add cast to int.
* unittests/enum-flags-selftests.c: New file.
* record-btrace.c (btrace_thread_flag_to_str): Change parameter's
type to btrace_thread_flags from btrace_thread_flag.
(record_btrace_cancel_resume, record_btrace_step_thread): Change
local's type to btrace_thread_flags from btrace_thread_flag. Add
cast in DEBUG call.
gdbsupport/ChangeLog:
* enum-flags.h: Include "traits.h".
(DEF_ENUM_FLAGS_TYPE): Declare a function instead of defining a
structure.
(enum_underlying_type): Update comment.
(namespace enum_flags_detail): New. Move struct zero_type here.
(EnumIsUnsigned, EnumIsSigned): New.
(class enum_flags): Make most methods constexpr.
(operator&=, operator|=, operator^=): Take an enum_flags instead
of an enum_type. Make rvalue ref versions deleted.
(operator enum_type()): Delete.
(operator&, operator|, operator^, operator~): Delete, moved out of
class.
(raw()): New method.
(is_enum_flags_enum_type_t): Declare.
(ENUM_FLAGS_GEN_BINOP, ENUM_FLAGS_GEN_COMPOUND_ASSIGN)
(ENUM_FLAGS_GEN_COMP): New. Use them to reimplement global
operators.
(operator~): Now constexpr and reimplemented.
(operator<<, operator>>): New deleted functions.
* valid-expr.h (CHECK_VALID_EXPR_5, CHECK_VALID_EXPR_6): New.
2020-09-15 04:16:59 +08:00
|
|
|
btrace_function_flags flags)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
if (bfun->up != 0)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_debug (bfun, "updating caller");
|
2013-03-11 16:17:08 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun->up = caller->number;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
bfun->flags = flags;
|
|
|
|
|
|
|
|
ftrace_debug (bfun, "set caller");
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
ftrace_debug (caller, "..to");
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Fix up the caller for all segments of a function. */
|
|
|
|
|
|
|
|
static void
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_caller (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *bfun,
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
struct btrace_function *caller,
|
Rewrite enum_flags, add unit tests, fix problems
This patch started by adding comprehensive unit tests for enum_flags.
For the testing part, it adds:
- tests of normal expected uses of the API.
- checks that _invalid_ uses of the API would fail to compile. I.e.,
it validates that enum_flags really is a strong type, and that
incorrect mixing of enum types would be caught at compile time. It
pulls that off making use of SFINEA and C++11's decltype/constexpr.
This revealed many holes in the enum_flags API. For example, the f1
assignment below currently incorrectly fails to compile:
enum_flags<flags> f1 = FLAG1;
enum_flags<flags> f2 = FLAG2 | f1;
The unit tests also revealed that this useful use case doesn't work:
enum flag { FLAG1 = 1, FLAG2 = 2 };
enum_flags<flag> src = FLAG1;
enum_flags<flag> f1 = condition ? src : FLAG2;
It fails to compile because enum_flags<flag> and flag are convertible
to each other.
Turns out that making enum_flags be implicitly convertible to the
backing raw enum type was not a good idea.
If we make it convertible to the underlying type instead, we fix that
ternary operator use case, and, we find cases throughout the codebase
that should be using the enum_flags but were using the raw backing
enum instead. So it's a good change overall.
Also, several operators were missing.
These holes and more are plugged by this patch, by reworking how the
enum_flags operators are implemented, and making use of C++11's
feature of being able to delete methods/functions.
There are cases in gdb/compile/ where we need to call a function in a
C plugin API that expects the raw enum. To address cases like that,
this adds a "raw()" method to enum_flags. This way we can keep using
the safer enum_flags to construct the value, and then be explicit when
we need to get at the raw enum.
This makes most of the enum_flags operators constexpr. Beyond
enabling more compiler optimizations and enabling the new unit tests,
this has other advantages, like making it possible to use operator|
with enum_flags values in switch cases, where only compile-time
constants are allowed:
enum_flags<flags> f = FLAG1 | FLAG2;
switch (f)
{
case FLAG1 | FLAG2:
break;
}
Currently that fails to compile.
It also switches to a different mechanism of enabling the global
operators. The current mechanism isn't namespace friendly, the new
one is.
It also switches to C++11-style SFINAE -- instead of wrapping the
return type in a SFINAE-friently structure, we use an unnamed template
parameter. I.e., this:
template <typename enum_type,
typename = is_enum_flags_enum_type_t<enum_type>>
enum_type
operator& (enum_type e1, enum_type e2)
instead of:
template <typename enum_type>
typename enum_flags_type<enum_type>::type
operator& (enum_type e1, enum_type e2)
Note that the static_assert inside operator~() was converted to a
couple overloads (signed vs unsigned), because static_assert is too
late for SFINAE-based tests, which is important for the CHECK_VALID
unit tests.
Tested with gcc {4.8, 7.1, 9.3} and clang {5.0.2, 10.0.0}.
gdb/ChangeLog:
* Makefile.in (SELFTESTS_SRCS): Add
unittests/enum-flags-selftests.c.
* btrace.c (ftrace_update_caller, ftrace_fixup_calle): Use
btrace_function_flags instead of enum btrace_function_flag.
* compile/compile-c-types.c (convert_qualified): Use
enum_flags::raw.
* compile/compile-cplus-symbols.c (convert_one_symbol)
(convert_symbol_bmsym):
* compile/compile-cplus-types.c (compile_cplus_convert_method)
(compile_cplus_convert_struct_or_union_methods)
(compile_cplus_instance::convert_qualified_base):
* go-exp.y (parse_string_or_char): Add cast to int.
* unittests/enum-flags-selftests.c: New file.
* record-btrace.c (btrace_thread_flag_to_str): Change parameter's
type to btrace_thread_flags from btrace_thread_flag.
(record_btrace_cancel_resume, record_btrace_step_thread): Change
local's type to btrace_thread_flags from btrace_thread_flag. Add
cast in DEBUG call.
gdbsupport/ChangeLog:
* enum-flags.h: Include "traits.h".
(DEF_ENUM_FLAGS_TYPE): Declare a function instead of defining a
structure.
(enum_underlying_type): Update comment.
(namespace enum_flags_detail): New. Move struct zero_type here.
(EnumIsUnsigned, EnumIsSigned): New.
(class enum_flags): Make most methods constexpr.
(operator&=, operator|=, operator^=): Take an enum_flags instead
of an enum_type. Make rvalue ref versions deleted.
(operator enum_type()): Delete.
(operator&, operator|, operator^, operator~): Delete, moved out of
class.
(raw()): New method.
(is_enum_flags_enum_type_t): Declare.
(ENUM_FLAGS_GEN_BINOP, ENUM_FLAGS_GEN_COMPOUND_ASSIGN)
(ENUM_FLAGS_GEN_COMP): New. Use them to reimplement global
operators.
(operator~): Now constexpr and reimplemented.
(operator<<, operator>>): New deleted functions.
* valid-expr.h (CHECK_VALID_EXPR_5, CHECK_VALID_EXPR_6): New.
2020-09-15 04:16:59 +08:00
|
|
|
btrace_function_flags flags)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
unsigned int prev, next;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
prev = bfun->prev;
|
|
|
|
next = bfun->next;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_update_caller (bfun, caller, flags);
|
|
|
|
|
|
|
|
/* Update all function segments belonging to the same function. */
|
2017-05-30 18:47:37 +08:00
|
|
|
for (; prev != 0; prev = bfun->prev)
|
|
|
|
{
|
|
|
|
bfun = ftrace_find_call_by_number (btinfo, prev);
|
|
|
|
ftrace_update_caller (bfun, caller, flags);
|
|
|
|
}
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
for (; next != 0; next = bfun->next)
|
|
|
|
{
|
|
|
|
bfun = ftrace_find_call_by_number (btinfo, next);
|
|
|
|
ftrace_update_caller (bfun, caller, flags);
|
|
|
|
}
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Add a new function segment for a call at the end of the trace.
|
2017-05-30 18:47:37 +08:00
|
|
|
BTINFO is the branch trace information for the current thread.
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
MFUN and FUN are the symbol information we have for this function. */
|
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_new_call (struct btrace_thread_info *btinfo,
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
struct minimal_symbol *mfun,
|
|
|
|
struct symbol *fun)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const unsigned int length = btinfo->functions.size ();
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun->up = length;
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
bfun->level += 1;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
ftrace_debug (bfun, "new call");
|
|
|
|
|
|
|
|
return bfun;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Add a new function segment for a tail call at the end of the trace.
|
2017-05-30 18:47:37 +08:00
|
|
|
BTINFO is the branch trace information for the current thread.
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
MFUN and FUN are the symbol information we have for this function. */
|
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_new_tailcall (struct btrace_thread_info *btinfo,
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
struct minimal_symbol *mfun,
|
|
|
|
struct symbol *fun)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const unsigned int length = btinfo->functions.size ();
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
|
2013-03-11 16:17:08 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun->up = length;
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
bfun->level += 1;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
|
2013-03-11 16:17:08 +08:00
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_debug (bfun, "new tail call");
|
|
|
|
|
|
|
|
return bfun;
|
|
|
|
}
|
|
|
|
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
/* Return the caller of BFUN or NULL if there is none. This function skips
|
2017-05-30 18:47:37 +08:00
|
|
|
tail calls in the call chain. BTINFO is the branch trace information for
|
|
|
|
the current thread. */
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_get_caller (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *bfun)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
|
2017-05-30 18:47:37 +08:00
|
|
|
return ftrace_find_call_by_number (btinfo, bfun->up);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
|
2017-05-30 18:47:37 +08:00
|
|
|
symbol information. BTINFO is the branch trace information for the current
|
|
|
|
thread. */
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_find_caller (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *bfun,
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
struct minimal_symbol *mfun,
|
|
|
|
struct symbol *fun)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
|
|
|
/* Skip functions with incompatible symbol information. */
|
|
|
|
if (ftrace_function_switched (bfun, mfun, fun))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* This is the function segment we're looking for. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return bfun;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the innermost caller in the back trace of BFUN, skipping all
|
|
|
|
function segments that do not end with a call instruction (e.g.
|
2017-05-30 18:47:37 +08:00
|
|
|
tail calls ending with a jump). BTINFO is the branch trace information for
|
|
|
|
the current thread. */
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_find_call (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *bfun)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
/* Skip gaps. */
|
|
|
|
if (bfun->errcode != 0)
|
|
|
|
continue;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
btrace_insn &last = bfun->insn.back ();
|
2013-03-11 16:17:08 +08:00
|
|
|
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
if (last.iclass == BTRACE_INSN_CALL)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return bfun;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Add a continuation segment for a function into which we return at the end of
|
|
|
|
the trace.
|
2017-05-30 18:47:37 +08:00
|
|
|
BTINFO is the branch trace information for the current thread.
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
MFUN and FUN are the symbol information we have for this function. */
|
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_new_return (struct btrace_thread_info *btinfo,
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
struct minimal_symbol *mfun,
|
|
|
|
struct symbol *fun)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *prev, *bfun, *caller;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = ftrace_new_function (btinfo, mfun, fun);
|
2017-05-30 18:47:37 +08:00
|
|
|
prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
/* It is important to start at PREV's caller. Otherwise, we might find
|
|
|
|
PREV itself, if PREV is a recursive function. */
|
2017-05-30 18:47:37 +08:00
|
|
|
caller = ftrace_find_call_by_number (btinfo, prev->up);
|
|
|
|
caller = ftrace_find_caller (btinfo, caller, mfun, fun);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
if (caller != NULL)
|
|
|
|
{
|
|
|
|
/* The caller of PREV is the preceding btrace function segment in this
|
|
|
|
function instance. */
|
2017-05-30 18:47:37 +08:00
|
|
|
gdb_assert (caller->next == 0);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
caller->next = bfun->number;
|
|
|
|
bfun->prev = caller->number;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
/* Maintain the function level. */
|
|
|
|
bfun->level = caller->level;
|
|
|
|
|
|
|
|
/* Maintain the call stack. */
|
|
|
|
bfun->up = caller->up;
|
|
|
|
bfun->flags = caller->flags;
|
|
|
|
|
|
|
|
ftrace_debug (bfun, "new return");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We did not find a caller. This could mean that something went
|
|
|
|
wrong or that the call is simply not included in the trace. */
|
2013-03-11 16:17:08 +08:00
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* Let's search for some actual call. */
|
2017-05-30 18:47:37 +08:00
|
|
|
caller = ftrace_find_call_by_number (btinfo, prev->up);
|
|
|
|
caller = ftrace_find_call (btinfo, caller);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
if (caller == NULL)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* There is no call in PREV's back trace. We assume that the
|
|
|
|
branch trace did not include it. */
|
|
|
|
|
btrace: preserve function level for unexpected returns
When encountering a return for which we have not seen a corresponding call, GDB
starts a new back trace from level -1, i.e. from the level of the first function
in the trace.
In the presence of trace gaps, this may cause some rather big jump.
(gdb) record function-call-history /c 192, +8
192 sbrk
193 brk
194 __x86.get_pc_thunk.bx
195 brk
196 __kernel_vsyscall
197 [disabled]
198 __kernel_vsyscall
199 brk
200 sbrk
This doesn't help to make things more clear. Let's remain on the same level
instead.
(gdb) record function-call-history /c 192, +8
192 sbrk
193 brk
194 __x86.get_pc_thunk.bx
195 brk
196 __kernel_vsyscall
197 [disabled]
198 __kernel_vsyscall
199 brk
200 sbrk
In this case it will look like we were able to connect the trace parts across
the disabled gap. We were not. More work is required to achieve this.
In the general case, the function-call history for the two trace parts won't
match. They may be off by a few levels or they may be entirely different. All
this patch does is to preserve the indentation level of the record
function-call-history command.
The disabled gap is caused by a sysenter not returning to the next instruction.
(gdb) record function-call-history /i 196, +1
196 __kernel_vsyscall inst 66515,66519
(gdb) record instruction-history 66515
66515 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx
66516 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx
66517 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp
66518 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp
66519 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter
[disabled]
66520 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp
66521 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx
66522 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx
66523 0xb7fdcc0b <__kernel_vsyscall+19>: ret
66524 0xb7e8e09e <brk+30>: xchg %ecx,%ebx
(gdb) disassemble 0xb7fdcbf8, 0xb7fdcc0c
Dump of assembler code from 0xb7fdcbf8 to 0xb7fdcc0c:
0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx
0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx
0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp
0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp
0xb7fdcbfd <__kernel_vsyscall+5>: sysenter
0xb7fdcbff <__kernel_vsyscall+7>: nop
0xb7fdcc00 <__kernel_vsyscall+8>: nop
0xb7fdcc01 <__kernel_vsyscall+9>: nop
0xb7fdcc02 <__kernel_vsyscall+10>: nop
0xb7fdcc03 <__kernel_vsyscall+11>: nop
0xb7fdcc04 <__kernel_vsyscall+12>: nop
0xb7fdcc05 <__kernel_vsyscall+13>: nop
0xb7fdcc06 <__kernel_vsyscall+14>: int $0x80
0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp
0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx
0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx
0xb7fdcc0b <__kernel_vsyscall+19>: ret
End of assembler dump.
I've seen this on 32-bit Fedora 23. I have not investigated what causes this
and whether we can avoid the gap in the first place. Let's first try to make
GDB handle such gaps more gracefully.
gdb/
* btrace.c (ftrace_new_return): Start from the previous function's level
if we can't find a matching call for a return.
2016-01-19 21:54:19 +08:00
|
|
|
/* Let's find the topmost function and add a new caller for it.
|
|
|
|
This should handle a series of initial tail calls. */
|
2017-05-30 18:47:37 +08:00
|
|
|
while (prev->up != 0)
|
|
|
|
prev = ftrace_find_call_by_number (btinfo, prev->up);
|
2013-03-11 16:17:08 +08:00
|
|
|
|
btrace: preserve function level for unexpected returns
When encountering a return for which we have not seen a corresponding call, GDB
starts a new back trace from level -1, i.e. from the level of the first function
in the trace.
In the presence of trace gaps, this may cause some rather big jump.
(gdb) record function-call-history /c 192, +8
192 sbrk
193 brk
194 __x86.get_pc_thunk.bx
195 brk
196 __kernel_vsyscall
197 [disabled]
198 __kernel_vsyscall
199 brk
200 sbrk
This doesn't help to make things more clear. Let's remain on the same level
instead.
(gdb) record function-call-history /c 192, +8
192 sbrk
193 brk
194 __x86.get_pc_thunk.bx
195 brk
196 __kernel_vsyscall
197 [disabled]
198 __kernel_vsyscall
199 brk
200 sbrk
In this case it will look like we were able to connect the trace parts across
the disabled gap. We were not. More work is required to achieve this.
In the general case, the function-call history for the two trace parts won't
match. They may be off by a few levels or they may be entirely different. All
this patch does is to preserve the indentation level of the record
function-call-history command.
The disabled gap is caused by a sysenter not returning to the next instruction.
(gdb) record function-call-history /i 196, +1
196 __kernel_vsyscall inst 66515,66519
(gdb) record instruction-history 66515
66515 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx
66516 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx
66517 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp
66518 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp
66519 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter
[disabled]
66520 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp
66521 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx
66522 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx
66523 0xb7fdcc0b <__kernel_vsyscall+19>: ret
66524 0xb7e8e09e <brk+30>: xchg %ecx,%ebx
(gdb) disassemble 0xb7fdcbf8, 0xb7fdcc0c
Dump of assembler code from 0xb7fdcbf8 to 0xb7fdcc0c:
0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx
0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx
0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp
0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp
0xb7fdcbfd <__kernel_vsyscall+5>: sysenter
0xb7fdcbff <__kernel_vsyscall+7>: nop
0xb7fdcc00 <__kernel_vsyscall+8>: nop
0xb7fdcc01 <__kernel_vsyscall+9>: nop
0xb7fdcc02 <__kernel_vsyscall+10>: nop
0xb7fdcc03 <__kernel_vsyscall+11>: nop
0xb7fdcc04 <__kernel_vsyscall+12>: nop
0xb7fdcc05 <__kernel_vsyscall+13>: nop
0xb7fdcc06 <__kernel_vsyscall+14>: int $0x80
0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp
0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx
0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx
0xb7fdcc0b <__kernel_vsyscall+19>: ret
End of assembler dump.
I've seen this on 32-bit Fedora 23. I have not investigated what causes this
and whether we can avoid the gap in the first place. Let's first try to make
GDB handle such gaps more gracefully.
gdb/
* btrace.c (ftrace_new_return): Start from the previous function's level
if we can't find a matching call for a return.
2016-01-19 21:54:19 +08:00
|
|
|
bfun->level = prev->level - 1;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
/* Fix up the call stack for PREV. */
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
ftrace_debug (bfun, "new return - no caller");
|
|
|
|
}
|
|
|
|
else
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* There is a call in PREV's back trace to which we should have
|
btrace: preserve function level for unexpected returns
When encountering a return for which we have not seen a corresponding call, GDB
starts a new back trace from level -1, i.e. from the level of the first function
in the trace.
In the presence of trace gaps, this may cause some rather big jump.
(gdb) record function-call-history /c 192, +8
192 sbrk
193 brk
194 __x86.get_pc_thunk.bx
195 brk
196 __kernel_vsyscall
197 [disabled]
198 __kernel_vsyscall
199 brk
200 sbrk
This doesn't help to make things more clear. Let's remain on the same level
instead.
(gdb) record function-call-history /c 192, +8
192 sbrk
193 brk
194 __x86.get_pc_thunk.bx
195 brk
196 __kernel_vsyscall
197 [disabled]
198 __kernel_vsyscall
199 brk
200 sbrk
In this case it will look like we were able to connect the trace parts across
the disabled gap. We were not. More work is required to achieve this.
In the general case, the function-call history for the two trace parts won't
match. They may be off by a few levels or they may be entirely different. All
this patch does is to preserve the indentation level of the record
function-call-history command.
The disabled gap is caused by a sysenter not returning to the next instruction.
(gdb) record function-call-history /i 196, +1
196 __kernel_vsyscall inst 66515,66519
(gdb) record instruction-history 66515
66515 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx
66516 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx
66517 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp
66518 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp
66519 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter
[disabled]
66520 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp
66521 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx
66522 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx
66523 0xb7fdcc0b <__kernel_vsyscall+19>: ret
66524 0xb7e8e09e <brk+30>: xchg %ecx,%ebx
(gdb) disassemble 0xb7fdcbf8, 0xb7fdcc0c
Dump of assembler code from 0xb7fdcbf8 to 0xb7fdcc0c:
0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx
0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx
0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp
0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp
0xb7fdcbfd <__kernel_vsyscall+5>: sysenter
0xb7fdcbff <__kernel_vsyscall+7>: nop
0xb7fdcc00 <__kernel_vsyscall+8>: nop
0xb7fdcc01 <__kernel_vsyscall+9>: nop
0xb7fdcc02 <__kernel_vsyscall+10>: nop
0xb7fdcc03 <__kernel_vsyscall+11>: nop
0xb7fdcc04 <__kernel_vsyscall+12>: nop
0xb7fdcc05 <__kernel_vsyscall+13>: nop
0xb7fdcc06 <__kernel_vsyscall+14>: int $0x80
0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp
0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx
0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx
0xb7fdcc0b <__kernel_vsyscall+19>: ret
End of assembler dump.
I've seen this on 32-bit Fedora 23. I have not investigated what causes this
and whether we can avoid the gap in the first place. Let's first try to make
GDB handle such gaps more gracefully.
gdb/
* btrace.c (ftrace_new_return): Start from the previous function's level
if we can't find a matching call for a return.
2016-01-19 21:54:19 +08:00
|
|
|
returned but didn't. Let's start a new, separate back trace
|
|
|
|
from PREV's level. */
|
|
|
|
bfun->level = prev->level - 1;
|
|
|
|
|
|
|
|
/* We fix up the back trace for PREV but leave other function segments
|
|
|
|
on the same level as they are.
|
|
|
|
This should handle things like schedule () correctly where we're
|
|
|
|
switching contexts. */
|
2017-05-30 18:47:37 +08:00
|
|
|
prev->up = bfun->number;
|
btrace: preserve function level for unexpected returns
When encountering a return for which we have not seen a corresponding call, GDB
starts a new back trace from level -1, i.e. from the level of the first function
in the trace.
In the presence of trace gaps, this may cause some rather big jump.
(gdb) record function-call-history /c 192, +8
192 sbrk
193 brk
194 __x86.get_pc_thunk.bx
195 brk
196 __kernel_vsyscall
197 [disabled]
198 __kernel_vsyscall
199 brk
200 sbrk
This doesn't help to make things more clear. Let's remain on the same level
instead.
(gdb) record function-call-history /c 192, +8
192 sbrk
193 brk
194 __x86.get_pc_thunk.bx
195 brk
196 __kernel_vsyscall
197 [disabled]
198 __kernel_vsyscall
199 brk
200 sbrk
In this case it will look like we were able to connect the trace parts across
the disabled gap. We were not. More work is required to achieve this.
In the general case, the function-call history for the two trace parts won't
match. They may be off by a few levels or they may be entirely different. All
this patch does is to preserve the indentation level of the record
function-call-history command.
The disabled gap is caused by a sysenter not returning to the next instruction.
(gdb) record function-call-history /i 196, +1
196 __kernel_vsyscall inst 66515,66519
(gdb) record instruction-history 66515
66515 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx
66516 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx
66517 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp
66518 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp
66519 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter
[disabled]
66520 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp
66521 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx
66522 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx
66523 0xb7fdcc0b <__kernel_vsyscall+19>: ret
66524 0xb7e8e09e <brk+30>: xchg %ecx,%ebx
(gdb) disassemble 0xb7fdcbf8, 0xb7fdcc0c
Dump of assembler code from 0xb7fdcbf8 to 0xb7fdcc0c:
0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx
0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx
0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp
0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp
0xb7fdcbfd <__kernel_vsyscall+5>: sysenter
0xb7fdcbff <__kernel_vsyscall+7>: nop
0xb7fdcc00 <__kernel_vsyscall+8>: nop
0xb7fdcc01 <__kernel_vsyscall+9>: nop
0xb7fdcc02 <__kernel_vsyscall+10>: nop
0xb7fdcc03 <__kernel_vsyscall+11>: nop
0xb7fdcc04 <__kernel_vsyscall+12>: nop
0xb7fdcc05 <__kernel_vsyscall+13>: nop
0xb7fdcc06 <__kernel_vsyscall+14>: int $0x80
0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp
0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx
0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx
0xb7fdcc0b <__kernel_vsyscall+19>: ret
End of assembler dump.
I've seen this on 32-bit Fedora 23. I have not investigated what causes this
and whether we can avoid the gap in the first place. Let's first try to make
GDB handle such gaps more gracefully.
gdb/
* btrace.c (ftrace_new_return): Start from the previous function's level
if we can't find a matching call for a return.
2016-01-19 21:54:19 +08:00
|
|
|
prev->flags = BFUN_UP_LINKS_TO_RET;
|
2013-03-11 16:17:08 +08:00
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_debug (bfun, "new return - unknown caller");
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return bfun;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Add a new function segment for a function switch at the end of the trace.
|
2017-05-30 18:47:37 +08:00
|
|
|
BTINFO is the branch trace information for the current thread.
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
MFUN and FUN are the symbol information we have for this function. */
|
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_new_switch (struct btrace_thread_info *btinfo,
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
struct minimal_symbol *mfun,
|
|
|
|
struct symbol *fun)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *prev, *bfun;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-01-30 16:16:27 +08:00
|
|
|
/* This is an unexplained function switch. We can't really be sure about the
|
|
|
|
call stack, yet the best I can think of right now is to preserve it. */
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = ftrace_new_function (btinfo, mfun, fun);
|
2017-05-30 18:47:37 +08:00
|
|
|
prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
|
2017-01-30 16:16:27 +08:00
|
|
|
bfun->up = prev->up;
|
|
|
|
bfun->flags = prev->flags;
|
2013-03-11 16:17:08 +08:00
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
ftrace_debug (bfun, "new switch");
|
|
|
|
|
|
|
|
return bfun;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Add a new function segment for a gap in the trace due to a decode error at
|
|
|
|
the end of the trace.
|
2017-05-30 18:47:37 +08:00
|
|
|
BTINFO is the branch trace information for the current thread.
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
ERRCODE is the format-specific error code. */
|
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
|
|
|
|
std::vector<unsigned int> &gaps)
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
{
|
|
|
|
struct btrace_function *bfun;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = ftrace_new_function (btinfo, NULL, NULL);
|
2017-05-30 18:47:37 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We hijack the previous function segment if it was empty. */
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &btinfo->functions.back ();
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
if (bfun->errcode != 0 || !bfun->insn.empty ())
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = ftrace_new_function (btinfo, NULL, NULL);
|
|
|
|
}
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
|
|
|
|
bfun->errcode = errcode;
|
2017-05-30 18:47:37 +08:00
|
|
|
gaps.push_back (bfun->number);
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
|
|
|
|
ftrace_debug (bfun, "new gap");
|
|
|
|
|
|
|
|
return bfun;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Update the current function segment at the end of the trace in BTINFO with
|
|
|
|
respect to the instruction at PC. This may create new function segments.
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
Return the chronologically latest function segment, never NULL. */
|
|
|
|
|
|
|
|
static struct btrace_function *
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
|
|
|
struct bound_minimal_symbol bmfun;
|
|
|
|
struct minimal_symbol *mfun;
|
|
|
|
struct symbol *fun;
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *bfun;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
/* Try to determine the function we're in. We use both types of symbols
|
|
|
|
to avoid surprises when we sometimes get a full symbol and sometimes
|
|
|
|
only a minimal symbol. */
|
|
|
|
fun = find_pc_function (pc);
|
|
|
|
bmfun = lookup_minimal_symbol_by_pc (pc);
|
|
|
|
mfun = bmfun.minsym;
|
|
|
|
|
|
|
|
if (fun == NULL && mfun == NULL)
|
|
|
|
DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* If we didn't have a function, we create one. */
|
|
|
|
if (btinfo->functions.empty ())
|
|
|
|
return ftrace_new_function (btinfo, mfun, fun);
|
|
|
|
|
|
|
|
/* If we had a gap before, we create a function. */
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &btinfo->functions.back ();
|
2017-05-30 18:47:37 +08:00
|
|
|
if (bfun->errcode != 0)
|
2017-05-30 18:47:37 +08:00
|
|
|
return ftrace_new_function (btinfo, mfun, fun);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
/* Check the last instruction, if we have one.
|
|
|
|
We do this check first, since it allows us to fill in the call stack
|
|
|
|
links in addition to the normal flow links. */
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
btrace_insn *last = NULL;
|
|
|
|
if (!bfun->insn.empty ())
|
|
|
|
last = &bfun->insn.back ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
if (last != NULL)
|
|
|
|
{
|
2014-01-29 19:56:09 +08:00
|
|
|
switch (last->iclass)
|
|
|
|
{
|
|
|
|
case BTRACE_INSN_RETURN:
|
2015-01-23 20:32:12 +08:00
|
|
|
{
|
|
|
|
const char *fname;
|
|
|
|
|
|
|
|
/* On some systems, _dl_runtime_resolve returns to the resolved
|
|
|
|
function instead of jumping to it. From our perspective,
|
|
|
|
however, this is a tailcall.
|
|
|
|
If we treated it as return, we wouldn't be able to find the
|
|
|
|
resolved function in our stack back trace. Hence, we would
|
|
|
|
lose the current stack back trace and start anew with an empty
|
|
|
|
back trace. When the resolved function returns, we would then
|
|
|
|
create a stack back trace with the same function names but
|
|
|
|
different frame id's. This will confuse stepping. */
|
|
|
|
fname = ftrace_print_function_name (bfun);
|
|
|
|
if (strcmp (fname, "_dl_runtime_resolve") == 0)
|
2017-05-30 18:47:37 +08:00
|
|
|
return ftrace_new_tailcall (btinfo, mfun, fun);
|
2015-01-23 20:32:12 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
return ftrace_new_return (btinfo, mfun, fun);
|
2015-01-23 20:32:12 +08:00
|
|
|
}
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2014-01-29 19:56:09 +08:00
|
|
|
case BTRACE_INSN_CALL:
|
|
|
|
/* Ignore calls to the next instruction. They are used for PIC. */
|
|
|
|
if (last->pc + last->size == pc)
|
|
|
|
break;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
return ftrace_new_call (btinfo, mfun, fun);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2014-01-29 19:56:09 +08:00
|
|
|
case BTRACE_INSN_JUMP:
|
|
|
|
{
|
|
|
|
CORE_ADDR start;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2014-01-29 19:56:09 +08:00
|
|
|
start = get_pc_function_start (pc);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2016-01-12 23:05:15 +08:00
|
|
|
/* A jump to the start of a function is (typically) a tail call. */
|
|
|
|
if (start == pc)
|
2017-05-30 18:47:37 +08:00
|
|
|
return ftrace_new_tailcall (btinfo, mfun, fun);
|
2016-01-12 23:05:15 +08:00
|
|
|
|
2018-09-24 17:33:11 +08:00
|
|
|
/* Some versions of _Unwind_RaiseException use an indirect
|
|
|
|
jump to 'return' to the exception handler of the caller
|
|
|
|
handling the exception instead of a return. Let's restrict
|
|
|
|
this heuristic to that and related functions. */
|
|
|
|
const char *fname = ftrace_print_function_name (bfun);
|
|
|
|
if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
|
|
|
|
{
|
|
|
|
struct btrace_function *caller
|
|
|
|
= ftrace_find_call_by_number (btinfo, bfun->up);
|
|
|
|
caller = ftrace_find_caller (btinfo, caller, mfun, fun);
|
|
|
|
if (caller != NULL)
|
|
|
|
return ftrace_new_return (btinfo, mfun, fun);
|
|
|
|
}
|
|
|
|
|
2014-01-29 19:56:09 +08:00
|
|
|
/* If we can't determine the function for PC, we treat a jump at
|
2016-01-12 23:05:15 +08:00
|
|
|
the end of the block as tail call if we're switching functions
|
|
|
|
and as an intra-function branch if we don't. */
|
|
|
|
if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
|
2017-05-30 18:47:37 +08:00
|
|
|
return ftrace_new_tailcall (btinfo, mfun, fun);
|
2016-01-12 23:05:15 +08:00
|
|
|
|
|
|
|
break;
|
2014-01-29 19:56:09 +08:00
|
|
|
}
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we're switching functions for some other reason. */
|
|
|
|
if (ftrace_function_switched (bfun, mfun, fun))
|
|
|
|
{
|
|
|
|
DEBUG_FTRACE ("switching from %s in %s at %s",
|
|
|
|
ftrace_print_insn_addr (last),
|
|
|
|
ftrace_print_function_name (bfun),
|
|
|
|
ftrace_print_filename (bfun));
|
2013-03-11 16:17:08 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
return ftrace_new_switch (btinfo, mfun, fun);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return bfun;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add the instruction at PC to BFUN's instructions. */
|
|
|
|
|
|
|
|
static void
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
bfun->insn.push_back (insn);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
if (record_debug > 1)
|
|
|
|
ftrace_debug (bfun, "update insn");
|
|
|
|
}
|
|
|
|
|
2014-01-29 19:56:09 +08:00
|
|
|
/* Classify the instruction at PC. */
|
|
|
|
|
|
|
|
static enum btrace_insn_class
|
|
|
|
ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
|
|
|
|
{
|
|
|
|
enum btrace_insn_class iclass;
|
|
|
|
|
|
|
|
iclass = BTRACE_INSN_OTHER;
|
2019-04-04 06:02:42 +08:00
|
|
|
try
|
2014-01-29 19:56:09 +08:00
|
|
|
{
|
|
|
|
if (gdbarch_insn_is_call (gdbarch, pc))
|
|
|
|
iclass = BTRACE_INSN_CALL;
|
|
|
|
else if (gdbarch_insn_is_ret (gdbarch, pc))
|
|
|
|
iclass = BTRACE_INSN_RETURN;
|
|
|
|
else if (gdbarch_insn_is_jump (gdbarch, pc))
|
|
|
|
iclass = BTRACE_INSN_JUMP;
|
|
|
|
}
|
2019-04-04 05:59:07 +08:00
|
|
|
catch (const gdb_exception_error &error)
|
Split TRY_CATCH into TRY + CATCH
This patch splits the TRY_CATCH macro into three, so that we go from
this:
~~~
volatile gdb_exception ex;
TRY_CATCH (ex, RETURN_MASK_ERROR)
{
}
if (ex.reason < 0)
{
}
~~~
to this:
~~~
TRY
{
}
CATCH (ex, RETURN_MASK_ERROR)
{
}
END_CATCH
~~~
Thus, we'll be getting rid of the local volatile exception object, and
declaring the caught exception in the catch block.
This allows reimplementing TRY/CATCH in terms of C++ exceptions when
building in C++ mode, while still allowing to build GDB in C mode
(using setjmp/longjmp), as a transition step.
TBC, after this patch, is it _not_ valid to have code between the TRY
and the CATCH blocks, like:
TRY
{
}
// some code here.
CATCH (ex, RETURN_MASK_ERROR)
{
}
END_CATCH
Just like it isn't valid to do that with C++'s native try/catch.
By switching to creating the exception object inside the CATCH block
scope, we can get rid of all the explicitly allocated volatile
exception objects all over the tree, and map the CATCH block more
directly to C++'s catch blocks.
The majority of the TRY_CATCH -> TRY+CATCH+END_CATCH conversion was
done with a script, rerun from scratch at every rebase, no manual
editing involved. After the mechanical conversion, a few places
needed manual intervention, to fix preexisting cases where we were
using the exception object outside of the TRY_CATCH block, and cases
where we were using "else" after a 'if (ex.reason) < 0)' [a CATCH
after this patch]. The result was folded into this patch so that GDB
still builds at each incremental step.
END_CATCH is necessary for two reasons:
First, because we name the exception object in the CATCH block, which
requires creating a scope, which in turn must be closed somewhere.
Declaring the exception variable in the initializer field of a for
block, like:
#define CATCH(EXCEPTION, mask) \
for (struct gdb_exception EXCEPTION; \
exceptions_state_mc_catch (&EXCEPTION, MASK); \
EXCEPTION = exception_none)
would avoid needing END_CATCH, but alas, in C mode, we build with C90,
which doesn't allow mixed declarations and code.
Second, because when TRY/CATCH are wired to real C++ try/catch, as
long as we need to handle cleanup chains, even if there's no CATCH
block that wants to catch the exception, we need for stop at every
frame in the unwind chain and run cleanups, then rethrow. That will
be done in END_CATCH.
After we require C++, we'll still need TRY/CATCH/END_CATCH until
cleanups are completely phased out -- TRY/CATCH in C++ mode will
save/restore the current cleanup chain, like in C mode, and END_CATCH
catches otherwise uncaugh exceptions, runs cleanups and rethrows, so
that C++ cleanups and exceptions can coexist.
IMO, this still makes the TRY/CATCH code look a bit more like a
newcomer would expect, so IMO worth it even if we weren't considering
C++.
gdb/ChangeLog.
2015-03-07 Pedro Alves <palves@redhat.com>
* common/common-exceptions.c (struct catcher) <exception>: No
longer a pointer to volatile exception. Now an exception value.
<mask>: Delete field.
(exceptions_state_mc_init): Remove all parameters. Adjust.
(exceptions_state_mc): No longer pop the catcher here.
(exceptions_state_mc_catch): New function.
(throw_exception): Adjust.
* common/common-exceptions.h (exceptions_state_mc_init): Remove
all parameters.
(exceptions_state_mc_catch): Declare.
(TRY_CATCH): Rename to ...
(TRY): ... this. Remove EXCEPTION and MASK parameters.
(CATCH, END_CATCH): New.
All callers adjusted.
gdb/gdbserver/ChangeLog:
2015-03-07 Pedro Alves <palves@redhat.com>
Adjust all callers of TRY_CATCH to use TRY/CATCH/END_CATCH
instead.
2015-03-07 23:14:14 +08:00
|
|
|
{
|
|
|
|
}
|
2014-01-29 19:56:09 +08:00
|
|
|
|
|
|
|
return iclass;
|
|
|
|
}
|
|
|
|
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
/* Try to match the back trace at LHS to the back trace at RHS. Returns the
|
|
|
|
number of matching function segments or zero if the back traces do not
|
2017-05-30 18:47:37 +08:00
|
|
|
match. BTINFO is the branch trace information for the current thread. */
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
static int
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_match_backtrace (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *lhs,
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
struct btrace_function *rhs)
|
|
|
|
{
|
|
|
|
int matches;
|
|
|
|
|
|
|
|
for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
|
|
|
|
{
|
|
|
|
if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
|
|
|
|
return 0;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
lhs = ftrace_get_caller (btinfo, lhs);
|
|
|
|
rhs = ftrace_get_caller (btinfo, rhs);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return matches;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
|
|
|
|
BTINFO is the branch trace information for the current thread. */
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
static void
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_level (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *bfun, int adjustment)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
|
|
|
if (adjustment == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
DEBUG_FTRACE ("fixup level (%+d)", adjustment);
|
|
|
|
ftrace_debug (bfun, "..bfun");
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
while (bfun != NULL)
|
|
|
|
{
|
|
|
|
bfun->level += adjustment;
|
|
|
|
bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
|
|
|
|
}
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Recompute the global level offset. Traverse the function trace and compute
|
|
|
|
the global level offset as the negative of the minimal function level. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
int level = INT_MAX;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
if (btinfo == NULL)
|
|
|
|
return;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
return;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
unsigned int length = btinfo->functions.size() - 1;
|
|
|
|
for (unsigned int i = 0; i < length; ++i)
|
2017-05-30 18:47:37 +08:00
|
|
|
level = std::min (level, btinfo->functions[i].level);
|
2017-05-30 18:47:37 +08:00
|
|
|
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
/* The last function segment contains the current instruction, which is not
|
|
|
|
really part of the trace. If it contains just this one instruction, we
|
2017-05-30 18:47:37 +08:00
|
|
|
ignore the segment. */
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *last = &btinfo->functions.back();
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
if (last->insn.size () != 1)
|
2017-05-30 18:47:37 +08:00
|
|
|
level = std::min (level, last->level);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
DEBUG_FTRACE ("setting global level offset: %d", -level);
|
|
|
|
btinfo->level = -level;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_connect_backtrace. BTINFO is the branch trace information for the
|
|
|
|
current thread. */
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
static void
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_connect_bfun (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *prev,
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
struct btrace_function *next)
|
|
|
|
{
|
|
|
|
DEBUG_FTRACE ("connecting...");
|
|
|
|
ftrace_debug (prev, "..prev");
|
|
|
|
ftrace_debug (next, "..next");
|
|
|
|
|
|
|
|
/* The function segments are not yet connected. */
|
2017-05-30 18:47:37 +08:00
|
|
|
gdb_assert (prev->next == 0);
|
|
|
|
gdb_assert (next->prev == 0);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
prev->next = next->number;
|
|
|
|
next->prev = prev->number;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
/* We may have moved NEXT to a different function level. */
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_level (btinfo, next, prev->level - next->level);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
/* If we run out of back trace for one, let's use the other's. */
|
2017-05-30 18:47:37 +08:00
|
|
|
if (prev->up == 0)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const btrace_function_flags flags = next->flags;
|
|
|
|
|
|
|
|
next = ftrace_find_call_by_number (btinfo, next->up);
|
|
|
|
if (next != NULL)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
|
|
|
DEBUG_FTRACE ("using next's callers");
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_caller (btinfo, prev, next, flags);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
}
|
2017-05-30 18:47:37 +08:00
|
|
|
else if (next->up == 0)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const btrace_function_flags flags = prev->flags;
|
|
|
|
|
|
|
|
prev = ftrace_find_call_by_number (btinfo, prev->up);
|
|
|
|
if (prev != NULL)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
|
|
|
DEBUG_FTRACE ("using prev's callers");
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_caller (btinfo, next, prev, flags);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
|
|
|
|
link to add the tail callers to NEXT's back trace.
|
|
|
|
|
|
|
|
This removes NEXT->UP from NEXT's back trace. It will be added back
|
|
|
|
when connecting NEXT and PREV's callers - provided they exist.
|
|
|
|
|
|
|
|
If PREV's back trace consists of a series of tail calls without an
|
|
|
|
actual call, there will be no further connection and NEXT's caller will
|
|
|
|
be removed for good. To catch this case, we handle it here and connect
|
|
|
|
the top of PREV's back trace to NEXT's caller. */
|
|
|
|
if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
|
|
|
|
{
|
|
|
|
struct btrace_function *caller;
|
2017-05-30 18:47:37 +08:00
|
|
|
btrace_function_flags next_flags, prev_flags;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
/* We checked NEXT->UP above so CALLER can't be NULL. */
|
2017-05-30 18:47:37 +08:00
|
|
|
caller = ftrace_find_call_by_number (btinfo, next->up);
|
|
|
|
next_flags = next->flags;
|
|
|
|
prev_flags = prev->flags;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
DEBUG_FTRACE ("adding prev's tail calls to next");
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
prev = ftrace_find_call_by_number (btinfo, prev->up);
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_caller (btinfo, next, prev, prev_flags);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
|
|
|
|
prev->up))
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
|
|
|
/* At the end of PREV's back trace, continue with CALLER. */
|
2017-05-30 18:47:37 +08:00
|
|
|
if (prev->up == 0)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
|
|
|
DEBUG_FTRACE ("fixing up link for tailcall chain");
|
|
|
|
ftrace_debug (prev, "..top");
|
|
|
|
ftrace_debug (caller, "..up");
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_caller (btinfo, prev, caller, next_flags);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
/* If we skipped any tail calls, this may move CALLER to a
|
|
|
|
different function level.
|
|
|
|
|
|
|
|
Note that changing CALLER's level is only OK because we
|
|
|
|
know that this is the last iteration of the bottom-to-top
|
|
|
|
walk in ftrace_connect_backtrace.
|
|
|
|
|
|
|
|
Otherwise we will fix up CALLER's level when we connect it
|
|
|
|
to PREV's caller in the next iteration. */
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_level (btinfo, caller,
|
|
|
|
prev->level - caller->level - 1);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* There's nothing to do if we find a real call. */
|
|
|
|
if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
|
|
|
|
{
|
|
|
|
DEBUG_FTRACE ("will fix up link in next iteration");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Connect function segments on the same level in the back trace at LHS and RHS.
|
|
|
|
The back traces at LHS and RHS are expected to match according to
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_match_backtrace. BTINFO is the branch trace information for the
|
|
|
|
current thread. */
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
static void
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *lhs,
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
struct btrace_function *rhs)
|
|
|
|
{
|
|
|
|
while (lhs != NULL && rhs != NULL)
|
|
|
|
{
|
|
|
|
struct btrace_function *prev, *next;
|
|
|
|
|
|
|
|
gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
|
|
|
|
|
|
|
|
/* Connecting LHS and RHS may change the up link. */
|
|
|
|
prev = lhs;
|
|
|
|
next = rhs;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
lhs = ftrace_get_caller (btinfo, lhs);
|
|
|
|
rhs = ftrace_get_caller (btinfo, rhs);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_connect_bfun (btinfo, prev, next);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bridge the gap between two function segments left and right of a gap if their
|
2017-05-30 18:47:37 +08:00
|
|
|
respective back traces match in at least MIN_MATCHES functions. BTINFO is
|
|
|
|
the branch trace information for the current thread.
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
Returns non-zero if the gap could be bridged, zero otherwise. */
|
|
|
|
|
|
|
|
static int
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_bridge_gap (struct btrace_thread_info *btinfo,
|
|
|
|
struct btrace_function *lhs, struct btrace_function *rhs,
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
int min_matches)
|
|
|
|
{
|
|
|
|
struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
|
|
|
|
int best_matches;
|
|
|
|
|
|
|
|
DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
|
|
|
|
rhs->insn_offset - 1, min_matches);
|
|
|
|
|
|
|
|
best_matches = 0;
|
|
|
|
best_l = NULL;
|
|
|
|
best_r = NULL;
|
|
|
|
|
|
|
|
/* We search the back traces of LHS and RHS for valid connections and connect
|
2019-10-18 08:48:08 +08:00
|
|
|
the two function segments that give the longest combined back trace. */
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
for (cand_l = lhs; cand_l != NULL;
|
|
|
|
cand_l = ftrace_get_caller (btinfo, cand_l))
|
|
|
|
for (cand_r = rhs; cand_r != NULL;
|
|
|
|
cand_r = ftrace_get_caller (btinfo, cand_r))
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
|
|
|
int matches;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
if (best_matches < matches)
|
|
|
|
{
|
|
|
|
best_matches = matches;
|
|
|
|
best_l = cand_l;
|
|
|
|
best_r = cand_r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We need at least MIN_MATCHES matches. */
|
|
|
|
gdb_assert (min_matches > 0);
|
|
|
|
if (best_matches < min_matches)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
DEBUG_FTRACE ("..matches: %d", best_matches);
|
|
|
|
|
|
|
|
/* We will fix up the level of BEST_R and succeeding function segments such
|
|
|
|
that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
|
|
|
|
|
|
|
|
This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
|
|
|
|
BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
|
|
|
|
|
|
|
|
To catch this, we already fix up the level here where we can start at RHS
|
|
|
|
instead of at BEST_R. We will ignore the level fixup when connecting
|
|
|
|
BEST_L to BEST_R as they will already be on the same level. */
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_connect_backtrace (btinfo, best_l, best_r);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
return best_matches;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to bridge gaps due to overflow or decode errors by connecting the
|
|
|
|
function segments that are separated by the gap. */
|
|
|
|
|
|
|
|
static void
|
2017-05-30 18:47:37 +08:00
|
|
|
btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_thread_info *btinfo = &tp->btrace;
|
2017-05-30 18:47:37 +08:00
|
|
|
std::vector<unsigned int> remaining;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
int min_matches;
|
|
|
|
|
|
|
|
DEBUG ("bridge gaps");
|
|
|
|
|
|
|
|
/* We require a minimum amount of matches for bridging a gap. The number of
|
|
|
|
required matches will be lowered with each iteration.
|
|
|
|
|
|
|
|
The more matches the higher our confidence that the bridging is correct.
|
|
|
|
For big gaps or small traces, however, it may not be feasible to require a
|
|
|
|
high number of matches. */
|
|
|
|
for (min_matches = 5; min_matches > 0; --min_matches)
|
|
|
|
{
|
|
|
|
/* Let's try to bridge as many gaps as we can. In some cases, we need to
|
|
|
|
skip a gap and revisit it again after we closed later gaps. */
|
2017-05-30 18:47:37 +08:00
|
|
|
while (!gaps.empty ())
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
for (const unsigned int number : gaps)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *gap, *lhs, *rhs;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
int bridged;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
gap = ftrace_find_call_by_number (btinfo, number);
|
|
|
|
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
/* We may have a sequence of gaps if we run from one error into
|
|
|
|
the next as we try to re-sync onto the trace stream. Ignore
|
|
|
|
all but the leftmost gap in such a sequence.
|
|
|
|
|
|
|
|
Also ignore gaps at the beginning of the trace. */
|
2017-05-30 18:47:37 +08:00
|
|
|
lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
if (lhs == NULL || lhs->errcode != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Skip gaps to the right. */
|
2017-05-30 18:47:37 +08:00
|
|
|
rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
|
|
|
|
while (rhs != NULL && rhs->errcode != 0)
|
|
|
|
rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
/* Ignore gaps at the end of the trace. */
|
|
|
|
if (rhs == NULL)
|
|
|
|
continue;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
|
|
|
/* Keep track of gaps we were not able to bridge and try again.
|
|
|
|
If we just pushed them to the end of GAPS we would risk an
|
|
|
|
infinite loop in case we simply cannot bridge a gap. */
|
|
|
|
if (bridged == 0)
|
2017-05-30 18:47:37 +08:00
|
|
|
remaining.push_back (number);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Let's see if we made any progress. */
|
2017-05-30 18:47:37 +08:00
|
|
|
if (remaining.size () == gaps.size ())
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
break;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
gaps.clear ();
|
|
|
|
gaps.swap (remaining);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We get here if either GAPS is empty or if GAPS equals REMAINING. */
|
2017-05-30 18:47:37 +08:00
|
|
|
if (gaps.empty ())
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
break;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
remaining.clear ();
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We may omit this in some cases. Not sure it is worth the extra
|
|
|
|
complication, though. */
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_compute_global_level_offset (btinfo);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
|
2013-11-13 22:31:07 +08:00
|
|
|
/* Compute the function branch trace from BTS trace. */
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
static void
|
2014-01-29 18:12:06 +08:00
|
|
|
btrace_compute_ftrace_bts (struct thread_info *tp,
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
const struct btrace_data_bts *btrace,
|
2017-05-30 18:47:37 +08:00
|
|
|
std::vector<unsigned int> &gaps)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2014-01-29 18:12:06 +08:00
|
|
|
struct btrace_thread_info *btinfo;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
struct gdbarch *gdbarch;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
unsigned int blk;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
int level;
|
|
|
|
|
|
|
|
gdbarch = target_gdbarch ();
|
2014-01-29 18:12:06 +08:00
|
|
|
btinfo = &tp->btrace;
|
2019-09-16 21:12:27 +08:00
|
|
|
blk = btrace->blocks->size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
|
|
|
level = INT_MAX;
|
|
|
|
else
|
|
|
|
level = -btinfo->level;
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
while (blk != 0)
|
|
|
|
{
|
|
|
|
CORE_ADDR pc;
|
|
|
|
|
|
|
|
blk -= 1;
|
|
|
|
|
2019-09-16 21:12:27 +08:00
|
|
|
const btrace_block &block = btrace->blocks->at (blk);
|
|
|
|
pc = block.begin;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *bfun;
|
2014-01-29 19:56:09 +08:00
|
|
|
struct btrace_insn insn;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
int size;
|
|
|
|
|
|
|
|
/* We should hit the end of the block. Warn if we went too far. */
|
2019-09-16 21:12:27 +08:00
|
|
|
if (block.end < pc)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2016-01-18 23:59:21 +08:00
|
|
|
/* Indicate the gap in the trace. */
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
|
2016-01-18 23:59:21 +08:00
|
|
|
|
|
|
|
warning (_("Recorded trace may be corrupted at instruction "
|
2017-05-30 18:47:37 +08:00
|
|
|
"%u (pc = %s)."), bfun->insn_offset - 1,
|
2016-01-18 23:59:21 +08:00
|
|
|
core_addr_to_string_nz (pc));
|
2016-01-12 17:44:37 +08:00
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = ftrace_update_function (btinfo, pc);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2013-04-18 16:58:05 +08:00
|
|
|
/* Maintain the function level offset.
|
|
|
|
For all but the last block, we do it here. */
|
|
|
|
if (blk != 0)
|
2017-05-30 18:47:37 +08:00
|
|
|
level = std::min (level, bfun->level);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2014-01-29 19:56:09 +08:00
|
|
|
size = 0;
|
2019-04-04 06:02:42 +08:00
|
|
|
try
|
Split TRY_CATCH into TRY + CATCH
This patch splits the TRY_CATCH macro into three, so that we go from
this:
~~~
volatile gdb_exception ex;
TRY_CATCH (ex, RETURN_MASK_ERROR)
{
}
if (ex.reason < 0)
{
}
~~~
to this:
~~~
TRY
{
}
CATCH (ex, RETURN_MASK_ERROR)
{
}
END_CATCH
~~~
Thus, we'll be getting rid of the local volatile exception object, and
declaring the caught exception in the catch block.
This allows reimplementing TRY/CATCH in terms of C++ exceptions when
building in C++ mode, while still allowing to build GDB in C mode
(using setjmp/longjmp), as a transition step.
TBC, after this patch, is it _not_ valid to have code between the TRY
and the CATCH blocks, like:
TRY
{
}
// some code here.
CATCH (ex, RETURN_MASK_ERROR)
{
}
END_CATCH
Just like it isn't valid to do that with C++'s native try/catch.
By switching to creating the exception object inside the CATCH block
scope, we can get rid of all the explicitly allocated volatile
exception objects all over the tree, and map the CATCH block more
directly to C++'s catch blocks.
The majority of the TRY_CATCH -> TRY+CATCH+END_CATCH conversion was
done with a script, rerun from scratch at every rebase, no manual
editing involved. After the mechanical conversion, a few places
needed manual intervention, to fix preexisting cases where we were
using the exception object outside of the TRY_CATCH block, and cases
where we were using "else" after a 'if (ex.reason) < 0)' [a CATCH
after this patch]. The result was folded into this patch so that GDB
still builds at each incremental step.
END_CATCH is necessary for two reasons:
First, because we name the exception object in the CATCH block, which
requires creating a scope, which in turn must be closed somewhere.
Declaring the exception variable in the initializer field of a for
block, like:
#define CATCH(EXCEPTION, mask) \
for (struct gdb_exception EXCEPTION; \
exceptions_state_mc_catch (&EXCEPTION, MASK); \
EXCEPTION = exception_none)
would avoid needing END_CATCH, but alas, in C mode, we build with C90,
which doesn't allow mixed declarations and code.
Second, because when TRY/CATCH are wired to real C++ try/catch, as
long as we need to handle cleanup chains, even if there's no CATCH
block that wants to catch the exception, we need for stop at every
frame in the unwind chain and run cleanups, then rethrow. That will
be done in END_CATCH.
After we require C++, we'll still need TRY/CATCH/END_CATCH until
cleanups are completely phased out -- TRY/CATCH in C++ mode will
save/restore the current cleanup chain, like in C mode, and END_CATCH
catches otherwise uncaugh exceptions, runs cleanups and rethrows, so
that C++ cleanups and exceptions can coexist.
IMO, this still makes the TRY/CATCH code look a bit more like a
newcomer would expect, so IMO worth it even if we weren't considering
C++.
gdb/ChangeLog.
2015-03-07 Pedro Alves <palves@redhat.com>
* common/common-exceptions.c (struct catcher) <exception>: No
longer a pointer to volatile exception. Now an exception value.
<mask>: Delete field.
(exceptions_state_mc_init): Remove all parameters. Adjust.
(exceptions_state_mc): No longer pop the catcher here.
(exceptions_state_mc_catch): New function.
(throw_exception): Adjust.
* common/common-exceptions.h (exceptions_state_mc_init): Remove
all parameters.
(exceptions_state_mc_catch): Declare.
(TRY_CATCH): Rename to ...
(TRY): ... this. Remove EXCEPTION and MASK parameters.
(CATCH, END_CATCH): New.
All callers adjusted.
gdb/gdbserver/ChangeLog:
2015-03-07 Pedro Alves <palves@redhat.com>
Adjust all callers of TRY_CATCH to use TRY/CATCH/END_CATCH
instead.
2015-03-07 23:14:14 +08:00
|
|
|
{
|
|
|
|
size = gdb_insn_length (gdbarch, pc);
|
|
|
|
}
|
2019-04-04 05:59:07 +08:00
|
|
|
catch (const gdb_exception_error &error)
|
Split TRY_CATCH into TRY + CATCH
This patch splits the TRY_CATCH macro into three, so that we go from
this:
~~~
volatile gdb_exception ex;
TRY_CATCH (ex, RETURN_MASK_ERROR)
{
}
if (ex.reason < 0)
{
}
~~~
to this:
~~~
TRY
{
}
CATCH (ex, RETURN_MASK_ERROR)
{
}
END_CATCH
~~~
Thus, we'll be getting rid of the local volatile exception object, and
declaring the caught exception in the catch block.
This allows reimplementing TRY/CATCH in terms of C++ exceptions when
building in C++ mode, while still allowing to build GDB in C mode
(using setjmp/longjmp), as a transition step.
TBC, after this patch, is it _not_ valid to have code between the TRY
and the CATCH blocks, like:
TRY
{
}
// some code here.
CATCH (ex, RETURN_MASK_ERROR)
{
}
END_CATCH
Just like it isn't valid to do that with C++'s native try/catch.
By switching to creating the exception object inside the CATCH block
scope, we can get rid of all the explicitly allocated volatile
exception objects all over the tree, and map the CATCH block more
directly to C++'s catch blocks.
The majority of the TRY_CATCH -> TRY+CATCH+END_CATCH conversion was
done with a script, rerun from scratch at every rebase, no manual
editing involved. After the mechanical conversion, a few places
needed manual intervention, to fix preexisting cases where we were
using the exception object outside of the TRY_CATCH block, and cases
where we were using "else" after a 'if (ex.reason) < 0)' [a CATCH
after this patch]. The result was folded into this patch so that GDB
still builds at each incremental step.
END_CATCH is necessary for two reasons:
First, because we name the exception object in the CATCH block, which
requires creating a scope, which in turn must be closed somewhere.
Declaring the exception variable in the initializer field of a for
block, like:
#define CATCH(EXCEPTION, mask) \
for (struct gdb_exception EXCEPTION; \
exceptions_state_mc_catch (&EXCEPTION, MASK); \
EXCEPTION = exception_none)
would avoid needing END_CATCH, but alas, in C mode, we build with C90,
which doesn't allow mixed declarations and code.
Second, because when TRY/CATCH are wired to real C++ try/catch, as
long as we need to handle cleanup chains, even if there's no CATCH
block that wants to catch the exception, we need for stop at every
frame in the unwind chain and run cleanups, then rethrow. That will
be done in END_CATCH.
After we require C++, we'll still need TRY/CATCH/END_CATCH until
cleanups are completely phased out -- TRY/CATCH in C++ mode will
save/restore the current cleanup chain, like in C mode, and END_CATCH
catches otherwise uncaugh exceptions, runs cleanups and rethrows, so
that C++ cleanups and exceptions can coexist.
IMO, this still makes the TRY/CATCH code look a bit more like a
newcomer would expect, so IMO worth it even if we weren't considering
C++.
gdb/ChangeLog.
2015-03-07 Pedro Alves <palves@redhat.com>
* common/common-exceptions.c (struct catcher) <exception>: No
longer a pointer to volatile exception. Now an exception value.
<mask>: Delete field.
(exceptions_state_mc_init): Remove all parameters. Adjust.
(exceptions_state_mc): No longer pop the catcher here.
(exceptions_state_mc_catch): New function.
(throw_exception): Adjust.
* common/common-exceptions.h (exceptions_state_mc_init): Remove
all parameters.
(exceptions_state_mc_catch): Declare.
(TRY_CATCH): Rename to ...
(TRY): ... this. Remove EXCEPTION and MASK parameters.
(CATCH, END_CATCH): New.
All callers adjusted.
gdb/gdbserver/ChangeLog:
2015-03-07 Pedro Alves <palves@redhat.com>
Adjust all callers of TRY_CATCH to use TRY/CATCH/END_CATCH
instead.
2015-03-07 23:14:14 +08:00
|
|
|
{
|
|
|
|
}
|
2014-01-29 19:56:09 +08:00
|
|
|
|
|
|
|
insn.pc = pc;
|
|
|
|
insn.size = size;
|
|
|
|
insn.iclass = ftrace_classify_insn (gdbarch, pc);
|
2014-03-19 20:49:58 +08:00
|
|
|
insn.flags = 0;
|
2014-01-29 19:56:09 +08:00
|
|
|
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
ftrace_update_insns (bfun, insn);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
/* We're done once we pushed the instruction at the end. */
|
2019-09-16 21:12:27 +08:00
|
|
|
if (block.end == pc)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
break;
|
|
|
|
|
2014-01-29 19:56:09 +08:00
|
|
|
/* We can't continue if we fail to compute the size. */
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
if (size <= 0)
|
|
|
|
{
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
/* Indicate the gap in the trace. We just added INSN so we're
|
|
|
|
not at the beginning. */
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
|
2016-01-12 17:44:37 +08:00
|
|
|
warning (_("Recorded trace may be incomplete at instruction %u "
|
2017-05-30 18:47:37 +08:00
|
|
|
"(pc = %s)."), bfun->insn_offset - 1,
|
2016-01-12 17:44:37 +08:00
|
|
|
core_addr_to_string_nz (pc));
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
pc += size;
|
2013-04-18 16:58:05 +08:00
|
|
|
|
|
|
|
/* Maintain the function level offset.
|
|
|
|
For the last block, we do it here to not consider the last
|
|
|
|
instruction.
|
|
|
|
Since the last instruction corresponds to the current instruction
|
|
|
|
and is not really part of the execution history, it shouldn't
|
|
|
|
affect the level. */
|
|
|
|
if (blk == 0)
|
2017-05-30 18:47:37 +08:00
|
|
|
level = std::min (level, bfun->level);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* LEVEL is the minimal function level of all btrace function segments.
|
|
|
|
Define the global level offset to -LEVEL so all function levels are
|
|
|
|
normalized to start at zero. */
|
|
|
|
btinfo->level = -level;
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
|
|
|
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
#if defined (HAVE_LIBIPT)
|
|
|
|
|
|
|
|
static enum btrace_insn_class
|
|
|
|
pt_reclassify_insn (enum pt_insn_class iclass)
|
|
|
|
{
|
|
|
|
switch (iclass)
|
|
|
|
{
|
|
|
|
case ptic_call:
|
|
|
|
return BTRACE_INSN_CALL;
|
|
|
|
|
|
|
|
case ptic_return:
|
|
|
|
return BTRACE_INSN_RETURN;
|
|
|
|
|
|
|
|
case ptic_jump:
|
|
|
|
return BTRACE_INSN_JUMP;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return BTRACE_INSN_OTHER;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-19 20:49:58 +08:00
|
|
|
/* Return the btrace instruction flags for INSN. */
|
|
|
|
|
2016-04-16 04:14:07 +08:00
|
|
|
static btrace_insn_flags
|
2017-04-25 08:27:42 +08:00
|
|
|
pt_btrace_insn_flags (const struct pt_insn &insn)
|
2014-03-19 20:49:58 +08:00
|
|
|
{
|
2016-04-16 04:14:07 +08:00
|
|
|
btrace_insn_flags flags = 0;
|
2014-03-19 20:49:58 +08:00
|
|
|
|
2017-04-25 08:27:42 +08:00
|
|
|
if (insn.speculative)
|
2014-03-19 20:49:58 +08:00
|
|
|
flags |= BTRACE_INSN_FLAG_SPECULATIVE;
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2017-04-25 08:27:42 +08:00
|
|
|
/* Return the btrace instruction for INSN. */
|
|
|
|
|
|
|
|
static btrace_insn
|
|
|
|
pt_btrace_insn (const struct pt_insn &insn)
|
|
|
|
{
|
|
|
|
return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
|
|
|
|
pt_reclassify_insn (insn.iclass),
|
|
|
|
pt_btrace_insn_flags (insn)};
|
|
|
|
}
|
|
|
|
|
2016-11-21 21:27:57 +08:00
|
|
|
/* Handle instruction decode events (libipt-v2). */
|
|
|
|
|
|
|
|
static int
|
|
|
|
handle_pt_insn_events (struct btrace_thread_info *btinfo,
|
|
|
|
struct pt_insn_decoder *decoder,
|
|
|
|
std::vector<unsigned int> &gaps, int status)
|
|
|
|
{
|
|
|
|
#if defined (HAVE_PT_INSN_EVENT)
|
|
|
|
while (status & pts_event_pending)
|
|
|
|
{
|
|
|
|
struct btrace_function *bfun;
|
|
|
|
struct pt_event event;
|
|
|
|
uint64_t offset;
|
|
|
|
|
|
|
|
status = pt_insn_event (decoder, &event, sizeof (event));
|
|
|
|
if (status < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
switch (event.type)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ptev_enabled:
|
|
|
|
if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
|
|
|
|
{
|
|
|
|
bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
|
|
|
|
|
|
|
|
pt_insn_get_offset (decoder, &offset);
|
|
|
|
|
|
|
|
warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
|
|
|
|
PRIx64 ")."), bfun->insn_offset - 1, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ptev_overflow:
|
|
|
|
bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
|
|
|
|
|
|
|
|
pt_insn_get_offset (decoder, &offset);
|
|
|
|
|
|
|
|
warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
|
|
|
|
bfun->insn_offset - 1, offset);
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* defined (HAVE_PT_INSN_EVENT) */
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle events indicated by flags in INSN (libipt-v1). */
|
|
|
|
|
|
|
|
static void
|
|
|
|
handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
|
|
|
|
struct pt_insn_decoder *decoder,
|
|
|
|
const struct pt_insn &insn,
|
|
|
|
std::vector<unsigned int> &gaps)
|
|
|
|
{
|
|
|
|
#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
|
|
|
|
/* Tracing is disabled and re-enabled each time we enter the kernel. Most
|
|
|
|
times, we continue from the same instruction we stopped before. This is
|
|
|
|
indicated via the RESUMED instruction flag. The ENABLED instruction flag
|
|
|
|
means that we continued from some other instruction. Indicate this as a
|
|
|
|
trace gap except when tracing just started. */
|
|
|
|
if (insn.enabled && !btinfo->functions.empty ())
|
|
|
|
{
|
|
|
|
struct btrace_function *bfun;
|
|
|
|
uint64_t offset;
|
|
|
|
|
|
|
|
bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
|
|
|
|
|
|
|
|
pt_insn_get_offset (decoder, &offset);
|
|
|
|
|
|
|
|
warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
|
|
|
|
", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
|
|
|
|
insn.ip);
|
|
|
|
}
|
|
|
|
#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
|
|
|
|
|
|
|
|
#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
|
|
|
|
/* Indicate trace overflows. */
|
|
|
|
if (insn.resynced)
|
|
|
|
{
|
|
|
|
struct btrace_function *bfun;
|
|
|
|
uint64_t offset;
|
|
|
|
|
|
|
|
bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
|
|
|
|
|
|
|
|
pt_insn_get_offset (decoder, &offset);
|
|
|
|
|
|
|
|
warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
|
|
|
|
PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
|
|
|
|
}
|
|
|
|
#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
|
|
|
|
}
|
2017-04-25 08:27:42 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Add function branch trace to BTINFO using DECODER. */
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
static void
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_add_pt (struct btrace_thread_info *btinfo,
|
|
|
|
struct pt_insn_decoder *decoder,
|
2017-05-30 18:47:37 +08:00
|
|
|
int *plevel,
|
2017-05-30 18:47:37 +08:00
|
|
|
std::vector<unsigned int> &gaps)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
struct btrace_function *bfun;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
uint64_t offset;
|
2016-11-21 21:27:57 +08:00
|
|
|
int status;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
struct pt_insn insn;
|
|
|
|
|
2016-11-21 21:27:57 +08:00
|
|
|
status = pt_insn_sync_forward (decoder);
|
|
|
|
if (status < 0)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
2016-11-21 21:27:57 +08:00
|
|
|
if (status != -pte_eos)
|
2016-01-12 23:03:11 +08:00
|
|
|
warning (_("Failed to synchronize onto the Intel Processor "
|
2016-11-21 21:27:57 +08:00
|
|
|
"Trace stream: %s."), pt_errstr (pt_errcode (status)));
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
2016-11-21 21:27:57 +08:00
|
|
|
/* Handle events from the previous iteration or synchronization. */
|
|
|
|
status = handle_pt_insn_events (btinfo, decoder, gaps, status);
|
|
|
|
if (status < 0)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
break;
|
|
|
|
|
2016-11-21 21:27:57 +08:00
|
|
|
status = pt_insn_next (decoder, &insn, sizeof(insn));
|
|
|
|
if (status < 0)
|
|
|
|
break;
|
2016-01-18 23:59:21 +08:00
|
|
|
|
2016-11-21 21:27:57 +08:00
|
|
|
/* Handle events indicated by flags in INSN. */
|
|
|
|
handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = ftrace_update_function (btinfo, insn.ip);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
/* Maintain the function level offset. */
|
2017-05-30 18:47:37 +08:00
|
|
|
*plevel = std::min (*plevel, bfun->level);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
2017-09-04 23:01:17 +08:00
|
|
|
ftrace_update_insns (bfun, pt_btrace_insn (insn));
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
}
|
|
|
|
|
2016-11-21 21:27:57 +08:00
|
|
|
if (status == -pte_eos)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Indicate the gap in the trace. */
|
2016-11-21 21:27:57 +08:00
|
|
|
bfun = ftrace_new_gap (btinfo, status, gaps);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
2016-01-12 17:44:37 +08:00
|
|
|
pt_insn_get_offset (decoder, &offset);
|
|
|
|
|
|
|
|
warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
|
2016-11-21 21:27:57 +08:00
|
|
|
", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
|
|
|
|
offset, insn.ip, pt_errstr (pt_errcode (status)));
|
2016-01-12 17:44:37 +08:00
|
|
|
}
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* A callback function to allow the trace decoder to read the inferior's
|
|
|
|
memory. */
|
|
|
|
|
|
|
|
static int
|
|
|
|
btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
|
btrace: fix build fail with 32-bit BFD
When compiling GDB with 32-bit BFD, the build fails with:
In file included from btrace.h:33:0,
from btrace.c:23:
/usr/include/intel-pt.h:1643:51: note: expected 'int (*)(uint8_t *, size_t,
const struct pt_asid *, uint64_t, void *)' but argument is of type 'int
(*)(gdb_byte *, size_t, const struct pt_asid *, CORE_ADDR, void *)' extern
pt_export int pt_image_set_callback(struct pt_image *image, ^
gdb/
* btrace.c (btrace_pt_readmem_callback): Change type of PC argument.
2015-07-07 19:54:34 +08:00
|
|
|
const struct pt_asid *asid, uint64_t pc,
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
void *context)
|
|
|
|
{
|
2015-12-23 20:53:53 +08:00
|
|
|
int result, errcode;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
2015-12-23 20:53:53 +08:00
|
|
|
result = (int) size;
|
2019-04-04 06:02:42 +08:00
|
|
|
try
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
btrace: fix build fail with 32-bit BFD
When compiling GDB with 32-bit BFD, the build fails with:
In file included from btrace.h:33:0,
from btrace.c:23:
/usr/include/intel-pt.h:1643:51: note: expected 'int (*)(uint8_t *, size_t,
const struct pt_asid *, uint64_t, void *)' but argument is of type 'int
(*)(gdb_byte *, size_t, const struct pt_asid *, CORE_ADDR, void *)' extern
pt_export int pt_image_set_callback(struct pt_image *image, ^
gdb/
* btrace.c (btrace_pt_readmem_callback): Change type of PC argument.
2015-07-07 19:54:34 +08:00
|
|
|
errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
if (errcode != 0)
|
2015-12-23 20:53:53 +08:00
|
|
|
result = -pte_nomap;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
}
|
2019-04-04 05:59:07 +08:00
|
|
|
catch (const gdb_exception_error &error)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
2015-12-23 20:53:53 +08:00
|
|
|
result = -pte_nomap;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
}
|
|
|
|
|
2015-12-23 20:53:53 +08:00
|
|
|
return result;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Translate the vendor from one enum to another. */
|
|
|
|
|
|
|
|
static enum pt_cpu_vendor
|
|
|
|
pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
|
|
|
|
{
|
|
|
|
switch (vendor)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
return pcv_unknown;
|
|
|
|
|
|
|
|
case CV_INTEL:
|
|
|
|
return pcv_intel;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finalize the function branch trace after decode. */
|
|
|
|
|
|
|
|
static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
|
|
|
|
struct thread_info *tp, int level)
|
|
|
|
{
|
|
|
|
pt_insn_free_decoder (decoder);
|
|
|
|
|
|
|
|
/* LEVEL is the minimal function level of all btrace function segments.
|
|
|
|
Define the global level offset to -LEVEL so all function levels are
|
|
|
|
normalized to start at zero. */
|
|
|
|
tp->btrace.level = -level;
|
|
|
|
|
|
|
|
/* Add a single last instruction entry for the current PC.
|
|
|
|
This allows us to compute the backtrace at the current PC using both
|
|
|
|
standard unwind and btrace unwind.
|
|
|
|
This extra entry is ignored by all record commands. */
|
|
|
|
btrace_add_pc (tp);
|
|
|
|
}
|
|
|
|
|
2016-01-12 23:03:11 +08:00
|
|
|
/* Compute the function branch trace from Intel Processor Trace
|
|
|
|
format. */
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_compute_ftrace_pt (struct thread_info *tp,
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
const struct btrace_data_pt *btrace,
|
2017-05-30 18:47:37 +08:00
|
|
|
std::vector<unsigned int> &gaps)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
|
|
|
struct btrace_thread_info *btinfo;
|
|
|
|
struct pt_insn_decoder *decoder;
|
|
|
|
struct pt_config config;
|
|
|
|
int level, errcode;
|
|
|
|
|
|
|
|
if (btrace->size == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
btinfo = &tp->btrace;
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
|
|
|
level = INT_MAX;
|
|
|
|
else
|
|
|
|
level = -btinfo->level;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
pt_config_init(&config);
|
|
|
|
config.begin = btrace->data;
|
|
|
|
config.end = btrace->data + btrace->size;
|
|
|
|
|
2018-02-02 19:29:48 +08:00
|
|
|
/* We treat an unknown vendor as 'no errata'. */
|
|
|
|
if (btrace->config.cpu.vendor != CV_UNKNOWN)
|
|
|
|
{
|
|
|
|
config.cpu.vendor
|
|
|
|
= pt_translate_cpu_vendor (btrace->config.cpu.vendor);
|
|
|
|
config.cpu.family = btrace->config.cpu.family;
|
|
|
|
config.cpu.model = btrace->config.cpu.model;
|
|
|
|
config.cpu.stepping = btrace->config.cpu.stepping;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
2018-02-02 19:29:48 +08:00
|
|
|
errcode = pt_cpu_errata (&config.errata, &config.cpu);
|
|
|
|
if (errcode < 0)
|
|
|
|
error (_("Failed to configure the Intel Processor Trace "
|
|
|
|
"decoder: %s."), pt_errstr (pt_errcode (errcode)));
|
|
|
|
}
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
decoder = pt_insn_alloc_decoder (&config);
|
|
|
|
if (decoder == NULL)
|
2016-01-12 23:03:11 +08:00
|
|
|
error (_("Failed to allocate the Intel Processor Trace decoder."));
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
2019-04-04 06:02:42 +08:00
|
|
|
try
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
|
|
|
struct pt_image *image;
|
|
|
|
|
|
|
|
image = pt_insn_get_image(decoder);
|
|
|
|
if (image == NULL)
|
2016-01-12 23:03:11 +08:00
|
|
|
error (_("Failed to configure the Intel Processor Trace decoder."));
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
|
|
|
|
if (errcode < 0)
|
2016-01-12 23:03:11 +08:00
|
|
|
error (_("Failed to configure the Intel Processor Trace decoder: "
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
"%s."), pt_errstr (pt_errcode (errcode)));
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_add_pt (btinfo, decoder, &level, gaps);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
}
|
2019-04-04 05:59:07 +08:00
|
|
|
catch (const gdb_exception &error)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
|
|
|
/* Indicate a gap in the trace if we quit trace processing. */
|
2017-05-30 18:47:37 +08:00
|
|
|
if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
|
2017-05-30 18:47:37 +08:00
|
|
|
ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
btrace_finalize_ftrace_pt (decoder, tp, level);
|
|
|
|
|
Replace throw_exception with throw in some cases
This replaces throw_exception with "throw;" when possible. This was
written by script. The rule that is followed is that uses of the
form:
catch (... &name)
{
...
throw_exception (name);
}
... can be rewritten. This should always be safe, because exceptions
are caught by const reference, and therefore can't be modified in the
body of the catch.
gdb/ChangeLog
2019-04-08 Tom Tromey <tom@tromey.com>
* valops.c (value_rtti_indirect_type): Replace throw_exception
with throw.
* tracefile-tfile.c (tfile_target_open): Replace throw_exception
with throw.
* thread.c (thr_try_catch_cmd): Replace throw_exception with
throw.
* target.c (target_translate_tls_address): Replace throw_exception
with throw.
* stack.c (frame_apply_command_count): Replace throw_exception
with throw.
* solib-spu.c (append_ocl_sos): Replace throw_exception with
throw.
* s390-tdep.c (s390_frame_unwind_cache): Replace throw_exception
with throw.
* rs6000-tdep.c (rs6000_frame_cache)
(rs6000_epilogue_frame_cache): Replace throw_exception with throw.
* remote.c: Replace throw_exception with throw.
* record-full.c (record_full_message, record_full_wait_1)
(record_full_restore): Replace throw_exception with throw.
* record-btrace.c:
(get_thread_current_frame_id, record_btrace_start_replaying)
(cmd_record_btrace_bts_start, cmd_record_btrace_pt_start)
(cmd_record_btrace_start): Replace throw_exception with throw.
* parse.c (parse_exp_in_context_1): Replace throw_exception with
throw.
* linux-nat.c (detach_one_lwp, linux_resume_one_lwp)
(resume_stopped_resumed_lwps): Replace throw_exception with throw.
* linespec.c:
(find_linespec_symbols): Replace throw_exception with throw.
* infrun.c (displaced_step_prepare, resume): Replace
throw_exception with throw.
* infcmd.c (post_create_inferior): Replace throw_exception with
throw.
* inf-loop.c (inferior_event_handler): Replace throw_exception
with throw.
* i386-tdep.c (i386_frame_cache, i386_epilogue_frame_cache)
(i386_sigtramp_frame_cache): Replace throw_exception with throw.
* frame.c (frame_unwind_pc, get_prev_frame_if_no_cycle)
(get_prev_frame_always, get_frame_pc_if_available)
(get_frame_address_in_block_if_available, get_frame_language):
Replace throw_exception with throw.
* frame-unwind.c (frame_unwind_try_unwinder): Replace
throw_exception with throw.
* eval.c (fetch_subexp_value, evaluate_var_value)
(evaluate_funcall, evaluate_subexp_standard): Replace
throw_exception with throw.
* dwarf2loc.c (call_site_find_chain)
(dwarf2_evaluate_loc_desc_full, dwarf2_locexpr_baton_eval):
Replace throw_exception with throw.
* dwarf2-frame.c (dwarf2_frame_cache): Replace throw_exception
with throw.
* darwin-nat.c (darwin_attach_pid): Replace throw_exception with
throw.
* cp-abi.c (baseclass_offset): Replace throw_exception with throw.
* completer.c (complete_line_internal): Replace throw_exception
with throw.
* compile/compile-object-run.c (compile_object_run): Replace
throw_exception with throw.
* cli/cli-script.c (process_next_line): Replace throw_exception
with throw.
* btrace.c (btrace_compute_ftrace_pt, btrace_compute_ftrace)
(btrace_enable, btrace_maint_update_pt_packets): Replace
throw_exception with throw.
* breakpoint.c (create_breakpoint, save_breakpoints): Replace
throw_exception with throw.
* break-catch-throw.c (re_set_exception_catchpoint): Replace
throw_exception with throw.
* amd64-tdep.c (amd64_frame_cache, amd64_sigtramp_frame_cache)
(amd64_epilogue_frame_cache): Replace throw_exception with throw.
* aarch64-tdep.c (aarch64_make_prologue_cache)
(aarch64_make_stub_cache): Replace throw_exception with throw.
gdb/gdbserver/ChangeLog
2019-04-08 Tom Tromey <tom@tromey.com>
* linux-low.c (linux_detach_one_lwp): Replace throw_exception with
throw.
(linux_resume_one_lwp): Likewise.
2019-01-29 01:45:45 +08:00
|
|
|
throw;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
btrace_finalize_ftrace_pt (decoder, tp, level);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* defined (HAVE_LIBIPT) */
|
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_compute_ftrace_pt (struct thread_info *tp,
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
const struct btrace_data_pt *btrace,
|
2017-05-30 18:47:37 +08:00
|
|
|
std::vector<unsigned int> &gaps)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
|
|
|
internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* defined (HAVE_LIBIPT) */
|
|
|
|
|
2013-11-13 22:31:07 +08:00
|
|
|
/* Compute the function branch trace from a block branch trace BTRACE for
|
2018-02-02 19:29:48 +08:00
|
|
|
a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
|
|
|
|
branch trace configuration. This is currently only used for the PT
|
|
|
|
format. */
|
2013-11-13 22:31:07 +08:00
|
|
|
|
|
|
|
static void
|
2018-02-02 19:29:48 +08:00
|
|
|
btrace_compute_ftrace_1 (struct thread_info *tp,
|
|
|
|
struct btrace_data *btrace,
|
|
|
|
const struct btrace_cpu *cpu,
|
2017-05-30 18:47:37 +08:00
|
|
|
std::vector<unsigned int> &gaps)
|
2013-11-13 22:31:07 +08:00
|
|
|
{
|
|
|
|
DEBUG ("compute ftrace");
|
|
|
|
|
|
|
|
switch (btrace->format)
|
|
|
|
{
|
|
|
|
case BTRACE_FORMAT_NONE:
|
|
|
|
return;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
|
2013-11-13 22:31:07 +08:00
|
|
|
return;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
case BTRACE_FORMAT_PT:
|
2018-02-02 19:29:48 +08:00
|
|
|
/* Overwrite the cpu we use for enabling errata workarounds. */
|
|
|
|
if (cpu != nullptr)
|
|
|
|
btrace->variant.pt.config.cpu = *cpu;
|
|
|
|
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
return;
|
2013-11-13 22:31:07 +08:00
|
|
|
}
|
|
|
|
|
2020-01-17 06:41:53 +08:00
|
|
|
internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
|
2013-11-13 22:31:07 +08:00
|
|
|
}
|
|
|
|
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
static void
|
2017-05-30 18:47:37 +08:00
|
|
|
btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
if (!gaps.empty ())
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
tp->btrace.ngaps += gaps.size ();
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
btrace_bridge_gaps (tp, gaps);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-02-02 19:29:48 +08:00
|
|
|
btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
|
|
|
|
const struct btrace_cpu *cpu)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
std::vector<unsigned int> gaps;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
2019-04-04 06:02:42 +08:00
|
|
|
try
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2018-02-02 19:29:48 +08:00
|
|
|
btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
2019-04-04 05:59:07 +08:00
|
|
|
catch (const gdb_exception &error)
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
btrace_finalize_ftrace (tp, gaps);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
|
Replace throw_exception with throw in some cases
This replaces throw_exception with "throw;" when possible. This was
written by script. The rule that is followed is that uses of the
form:
catch (... &name)
{
...
throw_exception (name);
}
... can be rewritten. This should always be safe, because exceptions
are caught by const reference, and therefore can't be modified in the
body of the catch.
gdb/ChangeLog
2019-04-08 Tom Tromey <tom@tromey.com>
* valops.c (value_rtti_indirect_type): Replace throw_exception
with throw.
* tracefile-tfile.c (tfile_target_open): Replace throw_exception
with throw.
* thread.c (thr_try_catch_cmd): Replace throw_exception with
throw.
* target.c (target_translate_tls_address): Replace throw_exception
with throw.
* stack.c (frame_apply_command_count): Replace throw_exception
with throw.
* solib-spu.c (append_ocl_sos): Replace throw_exception with
throw.
* s390-tdep.c (s390_frame_unwind_cache): Replace throw_exception
with throw.
* rs6000-tdep.c (rs6000_frame_cache)
(rs6000_epilogue_frame_cache): Replace throw_exception with throw.
* remote.c: Replace throw_exception with throw.
* record-full.c (record_full_message, record_full_wait_1)
(record_full_restore): Replace throw_exception with throw.
* record-btrace.c:
(get_thread_current_frame_id, record_btrace_start_replaying)
(cmd_record_btrace_bts_start, cmd_record_btrace_pt_start)
(cmd_record_btrace_start): Replace throw_exception with throw.
* parse.c (parse_exp_in_context_1): Replace throw_exception with
throw.
* linux-nat.c (detach_one_lwp, linux_resume_one_lwp)
(resume_stopped_resumed_lwps): Replace throw_exception with throw.
* linespec.c:
(find_linespec_symbols): Replace throw_exception with throw.
* infrun.c (displaced_step_prepare, resume): Replace
throw_exception with throw.
* infcmd.c (post_create_inferior): Replace throw_exception with
throw.
* inf-loop.c (inferior_event_handler): Replace throw_exception
with throw.
* i386-tdep.c (i386_frame_cache, i386_epilogue_frame_cache)
(i386_sigtramp_frame_cache): Replace throw_exception with throw.
* frame.c (frame_unwind_pc, get_prev_frame_if_no_cycle)
(get_prev_frame_always, get_frame_pc_if_available)
(get_frame_address_in_block_if_available, get_frame_language):
Replace throw_exception with throw.
* frame-unwind.c (frame_unwind_try_unwinder): Replace
throw_exception with throw.
* eval.c (fetch_subexp_value, evaluate_var_value)
(evaluate_funcall, evaluate_subexp_standard): Replace
throw_exception with throw.
* dwarf2loc.c (call_site_find_chain)
(dwarf2_evaluate_loc_desc_full, dwarf2_locexpr_baton_eval):
Replace throw_exception with throw.
* dwarf2-frame.c (dwarf2_frame_cache): Replace throw_exception
with throw.
* darwin-nat.c (darwin_attach_pid): Replace throw_exception with
throw.
* cp-abi.c (baseclass_offset): Replace throw_exception with throw.
* completer.c (complete_line_internal): Replace throw_exception
with throw.
* compile/compile-object-run.c (compile_object_run): Replace
throw_exception with throw.
* cli/cli-script.c (process_next_line): Replace throw_exception
with throw.
* btrace.c (btrace_compute_ftrace_pt, btrace_compute_ftrace)
(btrace_enable, btrace_maint_update_pt_packets): Replace
throw_exception with throw.
* breakpoint.c (create_breakpoint, save_breakpoints): Replace
throw_exception with throw.
* break-catch-throw.c (re_set_exception_catchpoint): Replace
throw_exception with throw.
* amd64-tdep.c (amd64_frame_cache, amd64_sigtramp_frame_cache)
(amd64_epilogue_frame_cache): Replace throw_exception with throw.
* aarch64-tdep.c (aarch64_make_prologue_cache)
(aarch64_make_stub_cache): Replace throw_exception with throw.
gdb/gdbserver/ChangeLog
2019-04-08 Tom Tromey <tom@tromey.com>
* linux-low.c (linux_detach_one_lwp): Replace throw_exception with
throw.
(linux_resume_one_lwp): Likewise.
2019-01-29 01:45:45 +08:00
|
|
|
throw;
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
btrace_finalize_ftrace (tp, gaps);
|
btrace: bridge gaps
Most of the time, the trace should be in one piece. This case is handled fine
by GDB. In some cases, however, there may be gaps in the trace. They result
from trace decode errors or from overflows.
A gap in the trace means we lost an unknown amount of trace. Gaps can be very
small, such as a few instructions in the same function, or they can be rather
big. We may, for example, lose a few function calls or returns. The trace may
continue in a different function and we likely don't know how we got there.
Even though we can't say how the program executed across a gap, higher levels
may not be impacted too much by it. Let's assume we have functions a-e and a
trace that looks roughly like this:
a
\
b b
\ /
c <gap> c
/
d d
\ /
e
Even though we can't say for sure, it is likely that b and c are the same
function instance before and after the gap. This patch is trying to connect
the c and b function segments across the gap.
This will add a to the back trace of b on the right hand side. The changes are
reflected in GDB's internal representation of the trace and will improve:
- the output of "record function-call-history /c"
- the output of "backtrace" in replay mode
- source stepping in replay mode
will be improved indirectly via the improved back trace
I don't have an automated test for this patch; decode errors will be fixed and
overflows occur sporadically and are quite rare. I tested it by hacking GDB to
provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp
test.
The issue is that we can't predict where we will be able to re-sync in case of
errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we
may be able to re-sync somewhere in dlclose, in test, in main, or not at all.
Here's one example run of gdb.btrace/dlopen.exp with and without this patch.
(gdb) info record
Active record target: record-btrace
Recording format: Intel Processor Trace.
Buffer size: 16kB.
warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31).
warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4).
warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24).
warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d).
warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3).
warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d).
warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134).
warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address.
warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70).
Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996).
(gdb) record instruction-history 83876, +2
83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax
[decode error (-13): no memory mapped at this address]
[disabled]
83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop
Without the patch, the trace is disconnected and the backtrace is short:
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
Backtrace stopped: not enough registers or memory available to unwind further
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
With the patch, GDB is able to connect the trace pieces and we get a full
backtrace.
(gdb) record goto 83876
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2
#1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2
#2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2
#5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2
#6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2
#9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2
#10 0x0804841c in ?? ()
#11 0x08048470 in dlopen@plt ()
#12 0x080485a3 in test ()
#13 0x08048628 in main ()
(gdb) record goto 83877
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
(gdb) backtrace
#0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2
#1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2
#2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2
#3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2
#4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2
#5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2
#6 0x0804860a in test ()
#7 0x08048628 in main ()
It worked nicely in this case but it may, of course, also lead to weird
connections; it is a heuristic, after all.
It works best when the gap is small and the trace pieces are long.
gdb/
* btrace.c (bfun_s): New typedef.
(ftrace_update_caller): Print caller in debug dump.
(ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level)
(ftrace_compute_global_level_offset, ftrace_connect_bfun)
(ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New.
(btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps.
(btrace_compute_ftrace_pt): Likewise.
(btrace_compute_ftrace): Split into this, ...
(btrace_compute_ftrace_1): ... this, and ...
(btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
|
|
|
}
|
|
|
|
|
2013-09-10 18:27:14 +08:00
|
|
|
/* Add an entry for the current PC. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_add_pc (struct thread_info *tp)
|
|
|
|
{
|
2013-11-13 22:31:07 +08:00
|
|
|
struct btrace_data btrace;
|
2013-09-10 18:27:14 +08:00
|
|
|
struct regcache *regcache;
|
|
|
|
CORE_ADDR pc;
|
|
|
|
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
regcache = get_thread_regcache (tp);
|
2013-09-10 18:27:14 +08:00
|
|
|
pc = regcache_read_pc (regcache);
|
|
|
|
|
2013-11-13 22:31:07 +08:00
|
|
|
btrace.format = BTRACE_FORMAT_BTS;
|
2019-10-03 05:01:46 +08:00
|
|
|
btrace.variant.bts.blocks = new std::vector<btrace_block>;
|
2013-09-10 18:27:14 +08:00
|
|
|
|
2019-09-16 21:12:27 +08:00
|
|
|
btrace.variant.bts.blocks->emplace_back (pc, pc);
|
2013-09-10 18:27:14 +08:00
|
|
|
|
2018-02-02 19:29:48 +08:00
|
|
|
btrace_compute_ftrace (tp, &btrace, NULL);
|
2013-09-10 18:27:14 +08:00
|
|
|
}
|
|
|
|
|
2013-03-11 16:17:08 +08:00
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
2013-11-28 22:44:13 +08:00
|
|
|
btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
|
|
|
if (tp->btrace.target != NULL)
|
2020-03-13 16:58:10 +08:00
|
|
|
error (_("Recording already enabled on thread %s (%s)."),
|
|
|
|
print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
|
2013-03-11 16:17:08 +08:00
|
|
|
|
2015-11-19 21:33:41 +08:00
|
|
|
#if !defined (HAVE_LIBIPT)
|
|
|
|
if (conf->format == BTRACE_FORMAT_PT)
|
2018-02-08 21:35:44 +08:00
|
|
|
error (_("Intel Processor Trace support was disabled at compile time."));
|
2015-11-19 21:33:41 +08:00
|
|
|
#endif /* !defined (HAVE_LIBIPT) */
|
|
|
|
|
Centralize thread ID printing
Add a new function to print a thread ID, in the style of paddress,
plongest, etc. and adjust all CLI-reachable paths to use it.
This gives us a single place to tweak to print inferior-qualified
thread IDs later:
- [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))]
+ [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))]
etc., though for now, this has no user-visible change.
No regressions on x86_64 Fedora 20.
gdb/ChangeLog:
2016-01-13 Pedro Alves <palves@redhat.com>
* breakpoint.c (remove_threaded_breakpoints)
(print_one_breakpoint_location): Use print_thread_id.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Use print_thread_id.
* common/print-utils.c (CELLSIZE): Delete.
(get_cell): Rename to ...
(get_print_cell): ... this and made extern. Adjust call callers.
Adjust to use PRINT_CELL_SIZE.
* common/print-utils.h (get_print_cell): Declare.
(PRINT_CELL_SIZE): New.
* gdbthread.h (print_thread_id): Declare.
* infcmd.c (signal_command): Use print_thread_id.
* inferior.c (print_inferior): Use print_thread_id.
* infrun.c (handle_signal_stop)
(insert_exception_resume_breakpoint)
(insert_exception_resume_from_probe)
(print_signal_received_reason): Use print_thread_id.
* record-btrace.c (record_btrace_info)
(record_btrace_resume_thread, record_btrace_cancel_resume)
(record_btrace_step_thread, record_btrace_wait): Use
print_thread_id.
* thread.c (thread_apply_all_command): Use print_thread_id.
(print_thread_id): New function.
(thread_apply_command): Use print_thread_id.
(thread_command, thread_find_command, do_captured_thread_select):
Use print_thread_id.
2016-01-13 18:56:06 +08:00
|
|
|
DEBUG ("enable thread %s (%s)", print_thread_id (tp),
|
Change pid_to_str to return std::string
Currently the target pid_to_str method returns a const char *, so many
implementations have a static buffer that they update. This patch
changes these methods to return a std::string instead. I think this
is cleaner and avoids possible gotchas when calling pid_to_str on
different ptids in a single statement. (Though no such calls exist
currently.)
This also updates various helper functions, and the gdbarch pid_to_str
methods.
I also made a best effort to fix all the callers, but I can't build
some of the *-nat.c files.
Tested by the buildbot.
gdb/ChangeLog
2019-03-13 Tom Tromey <tromey@adacore.com>
* i386-gnu-nat.c (i386_gnu_nat_target::fetch_registers)
(i386_gnu_nat_target::store_registers): Update.
* target-debug.h (target_debug_print_std_string): New macro.
* x86-linux-nat.c (x86_linux_nat_target::enable_btrace): Update.
* windows-tdep.c (display_one_tib): Update.
* tui/tui-stack.c (tui_make_status_line): Update.
* top.c (print_inferior_quit_action): Update.
* thread.c (thr_try_catch_cmd): Update.
(add_thread_with_info): Update.
(thread_target_id_str): Update.
(thr_try_catch_cmd): Update.
(thread_command): Update.
(thread_find_command): Update.
* record-btrace.c (record_btrace_target::info_record)
(record_btrace_resume_thread, record_btrace_target::resume)
(record_btrace_cancel_resume, record_btrace_step_thread)
(record_btrace_target::wait, record_btrace_target::wait)
(record_btrace_target::wait, record_btrace_target::stop): Update.
* progspace.c (print_program_space): Update.
* process-stratum-target.c
(process_stratum_target::thread_address_space): Update.
* linux-fork.c (linux_fork_mourn_inferior)
(detach_checkpoint_command, info_checkpoints_command)
(linux_fork_context): Update.
(linux_fork_detach): Update.
(class scoped_switch_fork_info): Update.
(delete_checkpoint_command): Update.
* infrun.c (follow_fork_inferior): Update.
(follow_fork_inferior): Update.
(proceed_after_vfork_done): Update.
(handle_vfork_child_exec_or_exit): Update.
(follow_exec): Update.
(displaced_step_prepare_throw): Update.
(displaced_step_restore): Update.
(start_step_over): Update.
(resume_1): Update.
(clear_proceed_status_thread): Update.
(proceed): Update.
(print_target_wait_results): Update.
(do_target_wait): Update.
(context_switch): Update.
(stop_all_threads): Update.
(restart_threads): Update.
(finish_step_over): Update.
(handle_signal_stop): Update.
(switch_back_to_stepped_thread): Update.
(keep_going_pass_signal): Update.
(print_exited_reason): Update.
(normal_stop): Update.
* inferior.c (inferior_pid_to_str): Change return type.
(print_selected_inferior): Update.
(add_inferior): Update.
(detach_inferior): Update.
* dummy-frame.c (fprint_dummy_frames): Update.
* dcache.c (dcache_info_1): Update.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Update.
* linux-tdep.c (linux_core_pid_to_str): Change return type.
* i386-cygwin-tdep.c (i386_windows_core_pid_to_str): Change return
type.
* fbsd-tdep.c (fbsd_core_pid_to_str): Change return type.
* sol2-tdep.h (sol2_core_pid_to_str): Change return type.
* sol2-tdep.c (sol2_core_pid_to_str): Change return type.
* gdbarch.c, gdbarch.h: Rebuild.
* gdbarch.sh (core_pid_to_str): Change return type.
* windows-nat.c (struct windows_nat_target) <pid_to_str>: Change
return type.
(windows_nat_target::pid_to_str): Change return type.
(windows_delete_thread): Update.
(windows_nat_target::attach): Update.
(windows_nat_target::files_info): Update.
* target-delegates.c: Rebuild.
* sol-thread.c (class sol_thread_target) <pid_to_str>: Change
return type.
(sol_thread_target::pid_to_str): Change return type.
* remote.c (class remote_target) <pid_to_str>: Change return
type.
(remote_target::pid_to_str): Change return type.
(extended_remote_target::attach, remote_target::remote_stop_ns)
(remote_target::remote_notif_remove_queued_reply)
(remote_target::push_stop_reply, remote_target::disable_btrace):
Update.
(extended_remote_target::attach): Update.
* remote-sim.c (struct gdbsim_target) <pid_to_str>: Change return
type.
(gdbsim_target::pid_to_str): Change return type.
* ravenscar-thread.c (struct ravenscar_thread_target)
<pid_to_str>: Change return type.
(ravenscar_thread_target::pid_to_str): Change return type.
* procfs.c (class procfs_target) <pid_to_str>: Change return
type.
(procfs_target::pid_to_str): Change return type.
(procfs_target::attach): Update.
(procfs_target::detach): Update.
(procfs_target::fetch_registers): Update.
(procfs_target::store_registers): Update.
(procfs_target::wait): Update.
(procfs_target::files_info): Update.
* obsd-nat.c (obsd_nat_target::pid_to_str): Change return type.
* nto-procfs.c (struct nto_procfs_target) <pid_to_str>: Change
return type.
(nto_procfs_target::pid_to_str): Change return type.
(nto_procfs_target::files_info, nto_procfs_target::attach): Update.
* linux-thread-db.c (class thread_db_target) <pid_to_str>: Change
return type.
* linux-nat.c (linux_nat_target::pid_to_str): Change return type.
(exit_lwp): Update.
(attach_proc_task_lwp_callback, get_detach_signal)
(detach_one_lwp, resume_lwp, linux_nat_target::resume)
(linux_nat_target::resume, wait_lwp, stop_callback)
(maybe_clear_ignore_sigint, stop_wait_callback, status_callback)
(save_stop_reason, select_event_lwp, linux_nat_filter_event)
(linux_nat_wait_1, resume_stopped_resumed_lwps)
(linux_nat_target::wait, linux_nat_stop_lwp): Update.
* inf-ptrace.c (inf_ptrace_target::pid_to_str): Change return
type.
(inf_ptrace_target::attach): Update.
(inf_ptrace_target::files_info): Update.
* go32-nat.c (struct go32_nat_target) <pid_to_str>: Change return
type.
(go32_nat_target::pid_to_str): Change return type.
* gnu-nat.c (gnu_nat_target::pid_to_str): Change return type.
(gnu_nat_target::wait): Update.
(gnu_nat_target::wait): Update.
(gnu_nat_target::resume): Update.
* fbsd-nat.c (fbsd_nat_target::pid_to_str): Change return type.
(fbsd_nat_target::wait): Update.
* darwin-nat.c (darwin_nat_target::pid_to_str): Change return
type.
(darwin_nat_target::attach): Update.
* corelow.c (class core_target) <pid_to_str>: Change return type.
(core_target::pid_to_str): Change return type.
* target.c (normal_pid_to_str): Change return type.
(default_pid_to_str): Likewise.
(target_pid_to_str): Change return type.
(target_translate_tls_address): Update.
(target_announce_detach): Update.
* bsd-uthread.c (struct bsd_uthread_target) <pid_to_str>: Change
return type.
(bsd_uthread_target::pid_to_str): Change return type.
* bsd-kvm.c (class bsd_kvm_target) <pid_to_str>: Change return
type.
(bsd_kvm_target::pid_to_str): Change return type.
* aix-thread.c (class aix_thread_target) <pid_to_str>: Change
return type.
(aix_thread_target::pid_to_str): Change return type.
* target.h (struct target_ops) <pid_to_str>: Change return type.
(target_pid_to_str, normal_pid_to_str): Likewise.
* obsd-nat.h (class obsd_nat_target) <pid_to_str>: Change return
type.
* linux-nat.h (class linux_nat_target) <pid_to_str>: Change return
type.
* inf-ptrace.h (struct inf_ptrace_target) <pid_to_str>: Change
return type.
* gnu-nat.h (struct gnu_nat_target) <pid_to_str>: Change return
type.
* fbsd-nat.h (class fbsd_nat_target) <pid_to_str>: Change return
type.
* darwin-nat.h (class darwin_nat_target) <pid_to_str>: Change
return type.
2019-03-01 00:09:55 +08:00
|
|
|
target_pid_to_str (tp->ptid).c_str ());
|
2013-03-11 16:17:08 +08:00
|
|
|
|
2013-11-28 22:44:13 +08:00
|
|
|
tp->btrace.target = target_enable_btrace (tp->ptid, conf);
|
2013-09-10 18:27:14 +08:00
|
|
|
|
2016-11-30 18:05:38 +08:00
|
|
|
if (tp->btrace.target == NULL)
|
2020-03-13 16:58:10 +08:00
|
|
|
error (_("Failed to enable recording on thread %s (%s)."),
|
|
|
|
print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
|
2016-11-30 18:05:38 +08:00
|
|
|
|
|
|
|
/* We need to undo the enable in case of errors. */
|
2019-04-04 06:02:42 +08:00
|
|
|
try
|
2016-11-30 18:05:38 +08:00
|
|
|
{
|
|
|
|
/* Add an entry for the current PC so we start tracing from where we
|
|
|
|
enabled it.
|
|
|
|
|
|
|
|
If we can't access TP's registers, TP is most likely running. In this
|
|
|
|
case, we can't really say where tracing was enabled so it should be
|
|
|
|
safe to simply skip this step.
|
|
|
|
|
|
|
|
This is not relevant for BTRACE_FORMAT_PT since the trace will already
|
|
|
|
start at the PC at which tracing was enabled. */
|
|
|
|
if (conf->format != BTRACE_FORMAT_PT
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
&& can_access_registers_thread (tp))
|
2016-11-30 18:05:38 +08:00
|
|
|
btrace_add_pc (tp);
|
|
|
|
}
|
2019-04-04 05:59:07 +08:00
|
|
|
catch (const gdb_exception &exception)
|
2016-11-30 18:05:38 +08:00
|
|
|
{
|
|
|
|
btrace_disable (tp);
|
|
|
|
|
Replace throw_exception with throw in some cases
This replaces throw_exception with "throw;" when possible. This was
written by script. The rule that is followed is that uses of the
form:
catch (... &name)
{
...
throw_exception (name);
}
... can be rewritten. This should always be safe, because exceptions
are caught by const reference, and therefore can't be modified in the
body of the catch.
gdb/ChangeLog
2019-04-08 Tom Tromey <tom@tromey.com>
* valops.c (value_rtti_indirect_type): Replace throw_exception
with throw.
* tracefile-tfile.c (tfile_target_open): Replace throw_exception
with throw.
* thread.c (thr_try_catch_cmd): Replace throw_exception with
throw.
* target.c (target_translate_tls_address): Replace throw_exception
with throw.
* stack.c (frame_apply_command_count): Replace throw_exception
with throw.
* solib-spu.c (append_ocl_sos): Replace throw_exception with
throw.
* s390-tdep.c (s390_frame_unwind_cache): Replace throw_exception
with throw.
* rs6000-tdep.c (rs6000_frame_cache)
(rs6000_epilogue_frame_cache): Replace throw_exception with throw.
* remote.c: Replace throw_exception with throw.
* record-full.c (record_full_message, record_full_wait_1)
(record_full_restore): Replace throw_exception with throw.
* record-btrace.c:
(get_thread_current_frame_id, record_btrace_start_replaying)
(cmd_record_btrace_bts_start, cmd_record_btrace_pt_start)
(cmd_record_btrace_start): Replace throw_exception with throw.
* parse.c (parse_exp_in_context_1): Replace throw_exception with
throw.
* linux-nat.c (detach_one_lwp, linux_resume_one_lwp)
(resume_stopped_resumed_lwps): Replace throw_exception with throw.
* linespec.c:
(find_linespec_symbols): Replace throw_exception with throw.
* infrun.c (displaced_step_prepare, resume): Replace
throw_exception with throw.
* infcmd.c (post_create_inferior): Replace throw_exception with
throw.
* inf-loop.c (inferior_event_handler): Replace throw_exception
with throw.
* i386-tdep.c (i386_frame_cache, i386_epilogue_frame_cache)
(i386_sigtramp_frame_cache): Replace throw_exception with throw.
* frame.c (frame_unwind_pc, get_prev_frame_if_no_cycle)
(get_prev_frame_always, get_frame_pc_if_available)
(get_frame_address_in_block_if_available, get_frame_language):
Replace throw_exception with throw.
* frame-unwind.c (frame_unwind_try_unwinder): Replace
throw_exception with throw.
* eval.c (fetch_subexp_value, evaluate_var_value)
(evaluate_funcall, evaluate_subexp_standard): Replace
throw_exception with throw.
* dwarf2loc.c (call_site_find_chain)
(dwarf2_evaluate_loc_desc_full, dwarf2_locexpr_baton_eval):
Replace throw_exception with throw.
* dwarf2-frame.c (dwarf2_frame_cache): Replace throw_exception
with throw.
* darwin-nat.c (darwin_attach_pid): Replace throw_exception with
throw.
* cp-abi.c (baseclass_offset): Replace throw_exception with throw.
* completer.c (complete_line_internal): Replace throw_exception
with throw.
* compile/compile-object-run.c (compile_object_run): Replace
throw_exception with throw.
* cli/cli-script.c (process_next_line): Replace throw_exception
with throw.
* btrace.c (btrace_compute_ftrace_pt, btrace_compute_ftrace)
(btrace_enable, btrace_maint_update_pt_packets): Replace
throw_exception with throw.
* breakpoint.c (create_breakpoint, save_breakpoints): Replace
throw_exception with throw.
* break-catch-throw.c (re_set_exception_catchpoint): Replace
throw_exception with throw.
* amd64-tdep.c (amd64_frame_cache, amd64_sigtramp_frame_cache)
(amd64_epilogue_frame_cache): Replace throw_exception with throw.
* aarch64-tdep.c (aarch64_make_prologue_cache)
(aarch64_make_stub_cache): Replace throw_exception with throw.
gdb/gdbserver/ChangeLog
2019-04-08 Tom Tromey <tom@tromey.com>
* linux-low.c (linux_detach_one_lwp): Replace throw_exception with
throw.
(linux_resume_one_lwp): Likewise.
2019-01-29 01:45:45 +08:00
|
|
|
throw;
|
2016-11-30 18:05:38 +08:00
|
|
|
}
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
2013-11-28 22:44:13 +08:00
|
|
|
const struct btrace_config *
|
|
|
|
btrace_conf (const struct btrace_thread_info *btinfo)
|
|
|
|
{
|
|
|
|
if (btinfo->target == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return target_btrace_conf (btinfo->target);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
2013-03-11 16:17:08 +08:00
|
|
|
void
|
|
|
|
btrace_disable (struct thread_info *tp)
|
|
|
|
{
|
|
|
|
struct btrace_thread_info *btp = &tp->btrace;
|
|
|
|
|
|
|
|
if (btp->target == NULL)
|
2020-03-13 16:58:10 +08:00
|
|
|
error (_("Recording not enabled on thread %s (%s)."),
|
|
|
|
print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
|
2013-03-11 16:17:08 +08:00
|
|
|
|
Centralize thread ID printing
Add a new function to print a thread ID, in the style of paddress,
plongest, etc. and adjust all CLI-reachable paths to use it.
This gives us a single place to tweak to print inferior-qualified
thread IDs later:
- [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))]
+ [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))]
etc., though for now, this has no user-visible change.
No regressions on x86_64 Fedora 20.
gdb/ChangeLog:
2016-01-13 Pedro Alves <palves@redhat.com>
* breakpoint.c (remove_threaded_breakpoints)
(print_one_breakpoint_location): Use print_thread_id.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Use print_thread_id.
* common/print-utils.c (CELLSIZE): Delete.
(get_cell): Rename to ...
(get_print_cell): ... this and made extern. Adjust call callers.
Adjust to use PRINT_CELL_SIZE.
* common/print-utils.h (get_print_cell): Declare.
(PRINT_CELL_SIZE): New.
* gdbthread.h (print_thread_id): Declare.
* infcmd.c (signal_command): Use print_thread_id.
* inferior.c (print_inferior): Use print_thread_id.
* infrun.c (handle_signal_stop)
(insert_exception_resume_breakpoint)
(insert_exception_resume_from_probe)
(print_signal_received_reason): Use print_thread_id.
* record-btrace.c (record_btrace_info)
(record_btrace_resume_thread, record_btrace_cancel_resume)
(record_btrace_step_thread, record_btrace_wait): Use
print_thread_id.
* thread.c (thread_apply_all_command): Use print_thread_id.
(print_thread_id): New function.
(thread_apply_command): Use print_thread_id.
(thread_command, thread_find_command, do_captured_thread_select):
Use print_thread_id.
2016-01-13 18:56:06 +08:00
|
|
|
DEBUG ("disable thread %s (%s)", print_thread_id (tp),
|
Change pid_to_str to return std::string
Currently the target pid_to_str method returns a const char *, so many
implementations have a static buffer that they update. This patch
changes these methods to return a std::string instead. I think this
is cleaner and avoids possible gotchas when calling pid_to_str on
different ptids in a single statement. (Though no such calls exist
currently.)
This also updates various helper functions, and the gdbarch pid_to_str
methods.
I also made a best effort to fix all the callers, but I can't build
some of the *-nat.c files.
Tested by the buildbot.
gdb/ChangeLog
2019-03-13 Tom Tromey <tromey@adacore.com>
* i386-gnu-nat.c (i386_gnu_nat_target::fetch_registers)
(i386_gnu_nat_target::store_registers): Update.
* target-debug.h (target_debug_print_std_string): New macro.
* x86-linux-nat.c (x86_linux_nat_target::enable_btrace): Update.
* windows-tdep.c (display_one_tib): Update.
* tui/tui-stack.c (tui_make_status_line): Update.
* top.c (print_inferior_quit_action): Update.
* thread.c (thr_try_catch_cmd): Update.
(add_thread_with_info): Update.
(thread_target_id_str): Update.
(thr_try_catch_cmd): Update.
(thread_command): Update.
(thread_find_command): Update.
* record-btrace.c (record_btrace_target::info_record)
(record_btrace_resume_thread, record_btrace_target::resume)
(record_btrace_cancel_resume, record_btrace_step_thread)
(record_btrace_target::wait, record_btrace_target::wait)
(record_btrace_target::wait, record_btrace_target::stop): Update.
* progspace.c (print_program_space): Update.
* process-stratum-target.c
(process_stratum_target::thread_address_space): Update.
* linux-fork.c (linux_fork_mourn_inferior)
(detach_checkpoint_command, info_checkpoints_command)
(linux_fork_context): Update.
(linux_fork_detach): Update.
(class scoped_switch_fork_info): Update.
(delete_checkpoint_command): Update.
* infrun.c (follow_fork_inferior): Update.
(follow_fork_inferior): Update.
(proceed_after_vfork_done): Update.
(handle_vfork_child_exec_or_exit): Update.
(follow_exec): Update.
(displaced_step_prepare_throw): Update.
(displaced_step_restore): Update.
(start_step_over): Update.
(resume_1): Update.
(clear_proceed_status_thread): Update.
(proceed): Update.
(print_target_wait_results): Update.
(do_target_wait): Update.
(context_switch): Update.
(stop_all_threads): Update.
(restart_threads): Update.
(finish_step_over): Update.
(handle_signal_stop): Update.
(switch_back_to_stepped_thread): Update.
(keep_going_pass_signal): Update.
(print_exited_reason): Update.
(normal_stop): Update.
* inferior.c (inferior_pid_to_str): Change return type.
(print_selected_inferior): Update.
(add_inferior): Update.
(detach_inferior): Update.
* dummy-frame.c (fprint_dummy_frames): Update.
* dcache.c (dcache_info_1): Update.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Update.
* linux-tdep.c (linux_core_pid_to_str): Change return type.
* i386-cygwin-tdep.c (i386_windows_core_pid_to_str): Change return
type.
* fbsd-tdep.c (fbsd_core_pid_to_str): Change return type.
* sol2-tdep.h (sol2_core_pid_to_str): Change return type.
* sol2-tdep.c (sol2_core_pid_to_str): Change return type.
* gdbarch.c, gdbarch.h: Rebuild.
* gdbarch.sh (core_pid_to_str): Change return type.
* windows-nat.c (struct windows_nat_target) <pid_to_str>: Change
return type.
(windows_nat_target::pid_to_str): Change return type.
(windows_delete_thread): Update.
(windows_nat_target::attach): Update.
(windows_nat_target::files_info): Update.
* target-delegates.c: Rebuild.
* sol-thread.c (class sol_thread_target) <pid_to_str>: Change
return type.
(sol_thread_target::pid_to_str): Change return type.
* remote.c (class remote_target) <pid_to_str>: Change return
type.
(remote_target::pid_to_str): Change return type.
(extended_remote_target::attach, remote_target::remote_stop_ns)
(remote_target::remote_notif_remove_queued_reply)
(remote_target::push_stop_reply, remote_target::disable_btrace):
Update.
(extended_remote_target::attach): Update.
* remote-sim.c (struct gdbsim_target) <pid_to_str>: Change return
type.
(gdbsim_target::pid_to_str): Change return type.
* ravenscar-thread.c (struct ravenscar_thread_target)
<pid_to_str>: Change return type.
(ravenscar_thread_target::pid_to_str): Change return type.
* procfs.c (class procfs_target) <pid_to_str>: Change return
type.
(procfs_target::pid_to_str): Change return type.
(procfs_target::attach): Update.
(procfs_target::detach): Update.
(procfs_target::fetch_registers): Update.
(procfs_target::store_registers): Update.
(procfs_target::wait): Update.
(procfs_target::files_info): Update.
* obsd-nat.c (obsd_nat_target::pid_to_str): Change return type.
* nto-procfs.c (struct nto_procfs_target) <pid_to_str>: Change
return type.
(nto_procfs_target::pid_to_str): Change return type.
(nto_procfs_target::files_info, nto_procfs_target::attach): Update.
* linux-thread-db.c (class thread_db_target) <pid_to_str>: Change
return type.
* linux-nat.c (linux_nat_target::pid_to_str): Change return type.
(exit_lwp): Update.
(attach_proc_task_lwp_callback, get_detach_signal)
(detach_one_lwp, resume_lwp, linux_nat_target::resume)
(linux_nat_target::resume, wait_lwp, stop_callback)
(maybe_clear_ignore_sigint, stop_wait_callback, status_callback)
(save_stop_reason, select_event_lwp, linux_nat_filter_event)
(linux_nat_wait_1, resume_stopped_resumed_lwps)
(linux_nat_target::wait, linux_nat_stop_lwp): Update.
* inf-ptrace.c (inf_ptrace_target::pid_to_str): Change return
type.
(inf_ptrace_target::attach): Update.
(inf_ptrace_target::files_info): Update.
* go32-nat.c (struct go32_nat_target) <pid_to_str>: Change return
type.
(go32_nat_target::pid_to_str): Change return type.
* gnu-nat.c (gnu_nat_target::pid_to_str): Change return type.
(gnu_nat_target::wait): Update.
(gnu_nat_target::wait): Update.
(gnu_nat_target::resume): Update.
* fbsd-nat.c (fbsd_nat_target::pid_to_str): Change return type.
(fbsd_nat_target::wait): Update.
* darwin-nat.c (darwin_nat_target::pid_to_str): Change return
type.
(darwin_nat_target::attach): Update.
* corelow.c (class core_target) <pid_to_str>: Change return type.
(core_target::pid_to_str): Change return type.
* target.c (normal_pid_to_str): Change return type.
(default_pid_to_str): Likewise.
(target_pid_to_str): Change return type.
(target_translate_tls_address): Update.
(target_announce_detach): Update.
* bsd-uthread.c (struct bsd_uthread_target) <pid_to_str>: Change
return type.
(bsd_uthread_target::pid_to_str): Change return type.
* bsd-kvm.c (class bsd_kvm_target) <pid_to_str>: Change return
type.
(bsd_kvm_target::pid_to_str): Change return type.
* aix-thread.c (class aix_thread_target) <pid_to_str>: Change
return type.
(aix_thread_target::pid_to_str): Change return type.
* target.h (struct target_ops) <pid_to_str>: Change return type.
(target_pid_to_str, normal_pid_to_str): Likewise.
* obsd-nat.h (class obsd_nat_target) <pid_to_str>: Change return
type.
* linux-nat.h (class linux_nat_target) <pid_to_str>: Change return
type.
* inf-ptrace.h (struct inf_ptrace_target) <pid_to_str>: Change
return type.
* gnu-nat.h (struct gnu_nat_target) <pid_to_str>: Change return
type.
* fbsd-nat.h (class fbsd_nat_target) <pid_to_str>: Change return
type.
* darwin-nat.h (class darwin_nat_target) <pid_to_str>: Change
return type.
2019-03-01 00:09:55 +08:00
|
|
|
target_pid_to_str (tp->ptid).c_str ());
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
target_disable_btrace (btp->target);
|
|
|
|
btp->target = NULL;
|
|
|
|
|
|
|
|
btrace_clear (tp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_teardown (struct thread_info *tp)
|
|
|
|
{
|
|
|
|
struct btrace_thread_info *btp = &tp->btrace;
|
|
|
|
|
|
|
|
if (btp->target == NULL)
|
|
|
|
return;
|
|
|
|
|
Centralize thread ID printing
Add a new function to print a thread ID, in the style of paddress,
plongest, etc. and adjust all CLI-reachable paths to use it.
This gives us a single place to tweak to print inferior-qualified
thread IDs later:
- [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))]
+ [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))]
etc., though for now, this has no user-visible change.
No regressions on x86_64 Fedora 20.
gdb/ChangeLog:
2016-01-13 Pedro Alves <palves@redhat.com>
* breakpoint.c (remove_threaded_breakpoints)
(print_one_breakpoint_location): Use print_thread_id.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Use print_thread_id.
* common/print-utils.c (CELLSIZE): Delete.
(get_cell): Rename to ...
(get_print_cell): ... this and made extern. Adjust call callers.
Adjust to use PRINT_CELL_SIZE.
* common/print-utils.h (get_print_cell): Declare.
(PRINT_CELL_SIZE): New.
* gdbthread.h (print_thread_id): Declare.
* infcmd.c (signal_command): Use print_thread_id.
* inferior.c (print_inferior): Use print_thread_id.
* infrun.c (handle_signal_stop)
(insert_exception_resume_breakpoint)
(insert_exception_resume_from_probe)
(print_signal_received_reason): Use print_thread_id.
* record-btrace.c (record_btrace_info)
(record_btrace_resume_thread, record_btrace_cancel_resume)
(record_btrace_step_thread, record_btrace_wait): Use
print_thread_id.
* thread.c (thread_apply_all_command): Use print_thread_id.
(print_thread_id): New function.
(thread_apply_command): Use print_thread_id.
(thread_command, thread_find_command, do_captured_thread_select):
Use print_thread_id.
2016-01-13 18:56:06 +08:00
|
|
|
DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
|
Change pid_to_str to return std::string
Currently the target pid_to_str method returns a const char *, so many
implementations have a static buffer that they update. This patch
changes these methods to return a std::string instead. I think this
is cleaner and avoids possible gotchas when calling pid_to_str on
different ptids in a single statement. (Though no such calls exist
currently.)
This also updates various helper functions, and the gdbarch pid_to_str
methods.
I also made a best effort to fix all the callers, but I can't build
some of the *-nat.c files.
Tested by the buildbot.
gdb/ChangeLog
2019-03-13 Tom Tromey <tromey@adacore.com>
* i386-gnu-nat.c (i386_gnu_nat_target::fetch_registers)
(i386_gnu_nat_target::store_registers): Update.
* target-debug.h (target_debug_print_std_string): New macro.
* x86-linux-nat.c (x86_linux_nat_target::enable_btrace): Update.
* windows-tdep.c (display_one_tib): Update.
* tui/tui-stack.c (tui_make_status_line): Update.
* top.c (print_inferior_quit_action): Update.
* thread.c (thr_try_catch_cmd): Update.
(add_thread_with_info): Update.
(thread_target_id_str): Update.
(thr_try_catch_cmd): Update.
(thread_command): Update.
(thread_find_command): Update.
* record-btrace.c (record_btrace_target::info_record)
(record_btrace_resume_thread, record_btrace_target::resume)
(record_btrace_cancel_resume, record_btrace_step_thread)
(record_btrace_target::wait, record_btrace_target::wait)
(record_btrace_target::wait, record_btrace_target::stop): Update.
* progspace.c (print_program_space): Update.
* process-stratum-target.c
(process_stratum_target::thread_address_space): Update.
* linux-fork.c (linux_fork_mourn_inferior)
(detach_checkpoint_command, info_checkpoints_command)
(linux_fork_context): Update.
(linux_fork_detach): Update.
(class scoped_switch_fork_info): Update.
(delete_checkpoint_command): Update.
* infrun.c (follow_fork_inferior): Update.
(follow_fork_inferior): Update.
(proceed_after_vfork_done): Update.
(handle_vfork_child_exec_or_exit): Update.
(follow_exec): Update.
(displaced_step_prepare_throw): Update.
(displaced_step_restore): Update.
(start_step_over): Update.
(resume_1): Update.
(clear_proceed_status_thread): Update.
(proceed): Update.
(print_target_wait_results): Update.
(do_target_wait): Update.
(context_switch): Update.
(stop_all_threads): Update.
(restart_threads): Update.
(finish_step_over): Update.
(handle_signal_stop): Update.
(switch_back_to_stepped_thread): Update.
(keep_going_pass_signal): Update.
(print_exited_reason): Update.
(normal_stop): Update.
* inferior.c (inferior_pid_to_str): Change return type.
(print_selected_inferior): Update.
(add_inferior): Update.
(detach_inferior): Update.
* dummy-frame.c (fprint_dummy_frames): Update.
* dcache.c (dcache_info_1): Update.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Update.
* linux-tdep.c (linux_core_pid_to_str): Change return type.
* i386-cygwin-tdep.c (i386_windows_core_pid_to_str): Change return
type.
* fbsd-tdep.c (fbsd_core_pid_to_str): Change return type.
* sol2-tdep.h (sol2_core_pid_to_str): Change return type.
* sol2-tdep.c (sol2_core_pid_to_str): Change return type.
* gdbarch.c, gdbarch.h: Rebuild.
* gdbarch.sh (core_pid_to_str): Change return type.
* windows-nat.c (struct windows_nat_target) <pid_to_str>: Change
return type.
(windows_nat_target::pid_to_str): Change return type.
(windows_delete_thread): Update.
(windows_nat_target::attach): Update.
(windows_nat_target::files_info): Update.
* target-delegates.c: Rebuild.
* sol-thread.c (class sol_thread_target) <pid_to_str>: Change
return type.
(sol_thread_target::pid_to_str): Change return type.
* remote.c (class remote_target) <pid_to_str>: Change return
type.
(remote_target::pid_to_str): Change return type.
(extended_remote_target::attach, remote_target::remote_stop_ns)
(remote_target::remote_notif_remove_queued_reply)
(remote_target::push_stop_reply, remote_target::disable_btrace):
Update.
(extended_remote_target::attach): Update.
* remote-sim.c (struct gdbsim_target) <pid_to_str>: Change return
type.
(gdbsim_target::pid_to_str): Change return type.
* ravenscar-thread.c (struct ravenscar_thread_target)
<pid_to_str>: Change return type.
(ravenscar_thread_target::pid_to_str): Change return type.
* procfs.c (class procfs_target) <pid_to_str>: Change return
type.
(procfs_target::pid_to_str): Change return type.
(procfs_target::attach): Update.
(procfs_target::detach): Update.
(procfs_target::fetch_registers): Update.
(procfs_target::store_registers): Update.
(procfs_target::wait): Update.
(procfs_target::files_info): Update.
* obsd-nat.c (obsd_nat_target::pid_to_str): Change return type.
* nto-procfs.c (struct nto_procfs_target) <pid_to_str>: Change
return type.
(nto_procfs_target::pid_to_str): Change return type.
(nto_procfs_target::files_info, nto_procfs_target::attach): Update.
* linux-thread-db.c (class thread_db_target) <pid_to_str>: Change
return type.
* linux-nat.c (linux_nat_target::pid_to_str): Change return type.
(exit_lwp): Update.
(attach_proc_task_lwp_callback, get_detach_signal)
(detach_one_lwp, resume_lwp, linux_nat_target::resume)
(linux_nat_target::resume, wait_lwp, stop_callback)
(maybe_clear_ignore_sigint, stop_wait_callback, status_callback)
(save_stop_reason, select_event_lwp, linux_nat_filter_event)
(linux_nat_wait_1, resume_stopped_resumed_lwps)
(linux_nat_target::wait, linux_nat_stop_lwp): Update.
* inf-ptrace.c (inf_ptrace_target::pid_to_str): Change return
type.
(inf_ptrace_target::attach): Update.
(inf_ptrace_target::files_info): Update.
* go32-nat.c (struct go32_nat_target) <pid_to_str>: Change return
type.
(go32_nat_target::pid_to_str): Change return type.
* gnu-nat.c (gnu_nat_target::pid_to_str): Change return type.
(gnu_nat_target::wait): Update.
(gnu_nat_target::wait): Update.
(gnu_nat_target::resume): Update.
* fbsd-nat.c (fbsd_nat_target::pid_to_str): Change return type.
(fbsd_nat_target::wait): Update.
* darwin-nat.c (darwin_nat_target::pid_to_str): Change return
type.
(darwin_nat_target::attach): Update.
* corelow.c (class core_target) <pid_to_str>: Change return type.
(core_target::pid_to_str): Change return type.
* target.c (normal_pid_to_str): Change return type.
(default_pid_to_str): Likewise.
(target_pid_to_str): Change return type.
(target_translate_tls_address): Update.
(target_announce_detach): Update.
* bsd-uthread.c (struct bsd_uthread_target) <pid_to_str>: Change
return type.
(bsd_uthread_target::pid_to_str): Change return type.
* bsd-kvm.c (class bsd_kvm_target) <pid_to_str>: Change return
type.
(bsd_kvm_target::pid_to_str): Change return type.
* aix-thread.c (class aix_thread_target) <pid_to_str>: Change
return type.
(aix_thread_target::pid_to_str): Change return type.
* target.h (struct target_ops) <pid_to_str>: Change return type.
(target_pid_to_str, normal_pid_to_str): Likewise.
* obsd-nat.h (class obsd_nat_target) <pid_to_str>: Change return
type.
* linux-nat.h (class linux_nat_target) <pid_to_str>: Change return
type.
* inf-ptrace.h (struct inf_ptrace_target) <pid_to_str>: Change
return type.
* gnu-nat.h (struct gnu_nat_target) <pid_to_str>: Change return
type.
* fbsd-nat.h (class fbsd_nat_target) <pid_to_str>: Change return
type.
* darwin-nat.h (class darwin_nat_target) <pid_to_str>: Change
return type.
2019-03-01 00:09:55 +08:00
|
|
|
target_pid_to_str (tp->ptid).c_str ());
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
target_teardown_btrace (btp->target);
|
|
|
|
btp->target = NULL;
|
|
|
|
|
|
|
|
btrace_clear (tp);
|
|
|
|
}
|
|
|
|
|
2013-11-13 22:31:07 +08:00
|
|
|
/* Stitch branch trace in BTS format. */
|
2013-06-03 21:39:35 +08:00
|
|
|
|
|
|
|
static int
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
|
2013-06-03 21:39:35 +08:00
|
|
|
{
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
struct btrace_thread_info *btinfo;
|
2013-06-03 21:39:35 +08:00
|
|
|
struct btrace_function *last_bfun;
|
2019-09-16 21:12:27 +08:00
|
|
|
btrace_block *first_new_block;
|
2013-06-03 21:39:35 +08:00
|
|
|
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
btinfo = &tp->btrace;
|
2017-05-30 18:47:37 +08:00
|
|
|
gdb_assert (!btinfo->functions.empty ());
|
2019-09-16 21:12:27 +08:00
|
|
|
gdb_assert (!btrace->blocks->empty ());
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
last_bfun = &btinfo->functions.back ();
|
2017-05-30 18:47:37 +08:00
|
|
|
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
/* If the existing trace ends with a gap, we just glue the traces
|
|
|
|
together. We need to drop the last (i.e. chronologically first) block
|
|
|
|
of the new trace, though, since we can't fill in the start address.*/
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
if (last_bfun->insn.empty ())
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
{
|
2019-09-16 21:12:27 +08:00
|
|
|
btrace->blocks->pop_back ();
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2013-06-03 21:39:35 +08:00
|
|
|
|
|
|
|
/* Beware that block trace starts with the most recent block, so the
|
|
|
|
chronologically first block in the new trace is the last block in
|
|
|
|
the new trace's block vector. */
|
2019-09-16 21:12:27 +08:00
|
|
|
first_new_block = &btrace->blocks->back ();
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
const btrace_insn &last_insn = last_bfun->insn.back ();
|
2013-06-03 21:39:35 +08:00
|
|
|
|
|
|
|
/* If the current PC at the end of the block is the same as in our current
|
|
|
|
trace, there are two explanations:
|
|
|
|
1. we executed the instruction and some branch brought us back.
|
|
|
|
2. we have not made any progress.
|
|
|
|
In the first case, the delta trace vector should contain at least two
|
|
|
|
entries.
|
|
|
|
In the second case, the delta trace vector should contain exactly one
|
|
|
|
entry for the partial block containing the current PC. Remove it. */
|
2019-09-16 21:12:27 +08:00
|
|
|
if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
|
2013-06-03 21:39:35 +08:00
|
|
|
{
|
2019-09-16 21:12:27 +08:00
|
|
|
btrace->blocks->pop_back ();
|
2013-06-03 21:39:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
|
2013-06-03 21:39:35 +08:00
|
|
|
core_addr_to_string_nz (first_new_block->end));
|
|
|
|
|
|
|
|
/* Do a simple sanity check to make sure we don't accidentally end up
|
|
|
|
with a bad block. This should not occur in practice. */
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
if (first_new_block->end < last_insn.pc)
|
2013-06-03 21:39:35 +08:00
|
|
|
{
|
|
|
|
warning (_("Error while trying to read delta trace. Falling back to "
|
|
|
|
"a full read."));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We adjust the last block to start at the end of our current trace. */
|
|
|
|
gdb_assert (first_new_block->begin == 0);
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
first_new_block->begin = last_insn.pc;
|
2013-06-03 21:39:35 +08:00
|
|
|
|
|
|
|
/* We simply pop the last insn so we can insert it again as part of
|
|
|
|
the normal branch trace computation.
|
|
|
|
Since instruction iterators are based on indices in the instructions
|
|
|
|
vector, we don't leave any pointers dangling. */
|
|
|
|
DEBUG ("pruning insn at %s for stitching",
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
ftrace_print_insn_addr (&last_insn));
|
2013-06-03 21:39:35 +08:00
|
|
|
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
last_bfun->insn.pop_back ();
|
2013-06-03 21:39:35 +08:00
|
|
|
|
|
|
|
/* The instructions vector may become empty temporarily if this has
|
|
|
|
been the only instruction in this function segment.
|
|
|
|
This violates the invariant but will be remedied shortly by
|
|
|
|
btrace_compute_ftrace when we add the new trace. */
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
|
|
|
|
/* The only case where this would hurt is if the entire trace consisted
|
|
|
|
of just that one instruction. If we remove it, we might turn the now
|
|
|
|
empty btrace function segment into a gap. But we don't want gaps at
|
|
|
|
the beginning. To avoid this, we remove the entire old trace. */
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
if (last_bfun->number == 1 && last_bfun->insn.empty ())
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
btrace_clear (tp);
|
|
|
|
|
2013-06-03 21:39:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-13 22:31:07 +08:00
|
|
|
/* Adjust the block trace in order to stitch old and new trace together.
|
|
|
|
BTRACE is the new delta trace between the last and the current stop.
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
TP is the traced thread.
|
|
|
|
May modifx BTRACE as well as the existing trace in TP.
|
2013-11-13 22:31:07 +08:00
|
|
|
Return 0 on success, -1 otherwise. */
|
|
|
|
|
|
|
|
static int
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
|
2013-11-13 22:31:07 +08:00
|
|
|
{
|
|
|
|
/* If we don't have trace, there's nothing to do. */
|
2018-06-08 05:34:36 +08:00
|
|
|
if (btrace->empty ())
|
2013-11-13 22:31:07 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (btrace->format)
|
|
|
|
{
|
|
|
|
case BTRACE_FORMAT_NONE:
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
return btrace_stitch_bts (&btrace->variant.bts, tp);
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
case BTRACE_FORMAT_PT:
|
|
|
|
/* Delta reads are not supported. */
|
|
|
|
return -1;
|
2013-11-13 22:31:07 +08:00
|
|
|
}
|
|
|
|
|
2020-01-17 06:41:53 +08:00
|
|
|
internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
|
2013-11-13 22:31:07 +08:00
|
|
|
}
|
|
|
|
|
2013-06-03 21:39:35 +08:00
|
|
|
/* Clear the branch trace histories in BTINFO. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_clear_history (struct btrace_thread_info *btinfo)
|
|
|
|
{
|
|
|
|
xfree (btinfo->insn_history);
|
|
|
|
xfree (btinfo->call_history);
|
|
|
|
xfree (btinfo->replay);
|
|
|
|
|
|
|
|
btinfo->insn_history = NULL;
|
|
|
|
btinfo->call_history = NULL;
|
|
|
|
btinfo->replay = NULL;
|
|
|
|
}
|
|
|
|
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
/* Clear the branch trace maintenance histories in BTINFO. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_maint_clear (struct btrace_thread_info *btinfo)
|
|
|
|
{
|
|
|
|
switch (btinfo->data.format)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
|
|
|
btinfo->maint.variant.bts.packet_history.begin = 0;
|
|
|
|
btinfo->maint.variant.bts.packet_history.end = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
#if defined (HAVE_LIBIPT)
|
|
|
|
case BTRACE_FORMAT_PT:
|
2019-09-20 01:17:59 +08:00
|
|
|
delete btinfo->maint.variant.pt.packets;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
btinfo->maint.variant.pt.packets = NULL;
|
|
|
|
btinfo->maint.variant.pt.packet_history.begin = 0;
|
|
|
|
btinfo->maint.variant.pt.packet_history.end = 0;
|
|
|
|
break;
|
|
|
|
#endif /* defined (HAVE_LIBIPT) */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-11 16:17:08 +08:00
|
|
|
/* See btrace.h. */
|
|
|
|
|
2016-11-21 23:39:57 +08:00
|
|
|
const char *
|
|
|
|
btrace_decode_error (enum btrace_format format, int errcode)
|
|
|
|
{
|
|
|
|
switch (format)
|
|
|
|
{
|
|
|
|
case BTRACE_FORMAT_BTS:
|
|
|
|
switch (errcode)
|
|
|
|
{
|
|
|
|
case BDE_BTS_OVERFLOW:
|
|
|
|
return _("instruction overflow");
|
|
|
|
|
|
|
|
case BDE_BTS_INSN_SIZE:
|
|
|
|
return _("unknown instruction");
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
#if defined (HAVE_LIBIPT)
|
|
|
|
case BTRACE_FORMAT_PT:
|
|
|
|
switch (errcode)
|
|
|
|
{
|
|
|
|
case BDE_PT_USER_QUIT:
|
|
|
|
return _("trace decode cancelled");
|
|
|
|
|
|
|
|
case BDE_PT_DISABLED:
|
|
|
|
return _("disabled");
|
|
|
|
|
|
|
|
case BDE_PT_OVERFLOW:
|
|
|
|
return _("overflow");
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (errcode < 0)
|
|
|
|
return pt_errstr (pt_errcode (errcode));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* defined (HAVE_LIBIPT) */
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return _("unknown");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
2013-03-11 16:17:08 +08:00
|
|
|
void
|
2018-02-02 19:29:48 +08:00
|
|
|
btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
|
2013-03-11 16:17:08 +08:00
|
|
|
{
|
|
|
|
struct btrace_thread_info *btinfo;
|
2013-06-03 21:39:35 +08:00
|
|
|
struct btrace_target_info *tinfo;
|
2013-11-13 22:31:07 +08:00
|
|
|
struct btrace_data btrace;
|
2013-06-03 21:39:35 +08:00
|
|
|
int errcode;
|
2013-03-11 16:17:08 +08:00
|
|
|
|
Centralize thread ID printing
Add a new function to print a thread ID, in the style of paddress,
plongest, etc. and adjust all CLI-reachable paths to use it.
This gives us a single place to tweak to print inferior-qualified
thread IDs later:
- [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))]
+ [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))]
etc., though for now, this has no user-visible change.
No regressions on x86_64 Fedora 20.
gdb/ChangeLog:
2016-01-13 Pedro Alves <palves@redhat.com>
* breakpoint.c (remove_threaded_breakpoints)
(print_one_breakpoint_location): Use print_thread_id.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Use print_thread_id.
* common/print-utils.c (CELLSIZE): Delete.
(get_cell): Rename to ...
(get_print_cell): ... this and made extern. Adjust call callers.
Adjust to use PRINT_CELL_SIZE.
* common/print-utils.h (get_print_cell): Declare.
(PRINT_CELL_SIZE): New.
* gdbthread.h (print_thread_id): Declare.
* infcmd.c (signal_command): Use print_thread_id.
* inferior.c (print_inferior): Use print_thread_id.
* infrun.c (handle_signal_stop)
(insert_exception_resume_breakpoint)
(insert_exception_resume_from_probe)
(print_signal_received_reason): Use print_thread_id.
* record-btrace.c (record_btrace_info)
(record_btrace_resume_thread, record_btrace_cancel_resume)
(record_btrace_step_thread, record_btrace_wait): Use
print_thread_id.
* thread.c (thread_apply_all_command): Use print_thread_id.
(print_thread_id): New function.
(thread_apply_command): Use print_thread_id.
(thread_command, thread_find_command, do_captured_thread_select):
Use print_thread_id.
2016-01-13 18:56:06 +08:00
|
|
|
DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
|
Change pid_to_str to return std::string
Currently the target pid_to_str method returns a const char *, so many
implementations have a static buffer that they update. This patch
changes these methods to return a std::string instead. I think this
is cleaner and avoids possible gotchas when calling pid_to_str on
different ptids in a single statement. (Though no such calls exist
currently.)
This also updates various helper functions, and the gdbarch pid_to_str
methods.
I also made a best effort to fix all the callers, but I can't build
some of the *-nat.c files.
Tested by the buildbot.
gdb/ChangeLog
2019-03-13 Tom Tromey <tromey@adacore.com>
* i386-gnu-nat.c (i386_gnu_nat_target::fetch_registers)
(i386_gnu_nat_target::store_registers): Update.
* target-debug.h (target_debug_print_std_string): New macro.
* x86-linux-nat.c (x86_linux_nat_target::enable_btrace): Update.
* windows-tdep.c (display_one_tib): Update.
* tui/tui-stack.c (tui_make_status_line): Update.
* top.c (print_inferior_quit_action): Update.
* thread.c (thr_try_catch_cmd): Update.
(add_thread_with_info): Update.
(thread_target_id_str): Update.
(thr_try_catch_cmd): Update.
(thread_command): Update.
(thread_find_command): Update.
* record-btrace.c (record_btrace_target::info_record)
(record_btrace_resume_thread, record_btrace_target::resume)
(record_btrace_cancel_resume, record_btrace_step_thread)
(record_btrace_target::wait, record_btrace_target::wait)
(record_btrace_target::wait, record_btrace_target::stop): Update.
* progspace.c (print_program_space): Update.
* process-stratum-target.c
(process_stratum_target::thread_address_space): Update.
* linux-fork.c (linux_fork_mourn_inferior)
(detach_checkpoint_command, info_checkpoints_command)
(linux_fork_context): Update.
(linux_fork_detach): Update.
(class scoped_switch_fork_info): Update.
(delete_checkpoint_command): Update.
* infrun.c (follow_fork_inferior): Update.
(follow_fork_inferior): Update.
(proceed_after_vfork_done): Update.
(handle_vfork_child_exec_or_exit): Update.
(follow_exec): Update.
(displaced_step_prepare_throw): Update.
(displaced_step_restore): Update.
(start_step_over): Update.
(resume_1): Update.
(clear_proceed_status_thread): Update.
(proceed): Update.
(print_target_wait_results): Update.
(do_target_wait): Update.
(context_switch): Update.
(stop_all_threads): Update.
(restart_threads): Update.
(finish_step_over): Update.
(handle_signal_stop): Update.
(switch_back_to_stepped_thread): Update.
(keep_going_pass_signal): Update.
(print_exited_reason): Update.
(normal_stop): Update.
* inferior.c (inferior_pid_to_str): Change return type.
(print_selected_inferior): Update.
(add_inferior): Update.
(detach_inferior): Update.
* dummy-frame.c (fprint_dummy_frames): Update.
* dcache.c (dcache_info_1): Update.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Update.
* linux-tdep.c (linux_core_pid_to_str): Change return type.
* i386-cygwin-tdep.c (i386_windows_core_pid_to_str): Change return
type.
* fbsd-tdep.c (fbsd_core_pid_to_str): Change return type.
* sol2-tdep.h (sol2_core_pid_to_str): Change return type.
* sol2-tdep.c (sol2_core_pid_to_str): Change return type.
* gdbarch.c, gdbarch.h: Rebuild.
* gdbarch.sh (core_pid_to_str): Change return type.
* windows-nat.c (struct windows_nat_target) <pid_to_str>: Change
return type.
(windows_nat_target::pid_to_str): Change return type.
(windows_delete_thread): Update.
(windows_nat_target::attach): Update.
(windows_nat_target::files_info): Update.
* target-delegates.c: Rebuild.
* sol-thread.c (class sol_thread_target) <pid_to_str>: Change
return type.
(sol_thread_target::pid_to_str): Change return type.
* remote.c (class remote_target) <pid_to_str>: Change return
type.
(remote_target::pid_to_str): Change return type.
(extended_remote_target::attach, remote_target::remote_stop_ns)
(remote_target::remote_notif_remove_queued_reply)
(remote_target::push_stop_reply, remote_target::disable_btrace):
Update.
(extended_remote_target::attach): Update.
* remote-sim.c (struct gdbsim_target) <pid_to_str>: Change return
type.
(gdbsim_target::pid_to_str): Change return type.
* ravenscar-thread.c (struct ravenscar_thread_target)
<pid_to_str>: Change return type.
(ravenscar_thread_target::pid_to_str): Change return type.
* procfs.c (class procfs_target) <pid_to_str>: Change return
type.
(procfs_target::pid_to_str): Change return type.
(procfs_target::attach): Update.
(procfs_target::detach): Update.
(procfs_target::fetch_registers): Update.
(procfs_target::store_registers): Update.
(procfs_target::wait): Update.
(procfs_target::files_info): Update.
* obsd-nat.c (obsd_nat_target::pid_to_str): Change return type.
* nto-procfs.c (struct nto_procfs_target) <pid_to_str>: Change
return type.
(nto_procfs_target::pid_to_str): Change return type.
(nto_procfs_target::files_info, nto_procfs_target::attach): Update.
* linux-thread-db.c (class thread_db_target) <pid_to_str>: Change
return type.
* linux-nat.c (linux_nat_target::pid_to_str): Change return type.
(exit_lwp): Update.
(attach_proc_task_lwp_callback, get_detach_signal)
(detach_one_lwp, resume_lwp, linux_nat_target::resume)
(linux_nat_target::resume, wait_lwp, stop_callback)
(maybe_clear_ignore_sigint, stop_wait_callback, status_callback)
(save_stop_reason, select_event_lwp, linux_nat_filter_event)
(linux_nat_wait_1, resume_stopped_resumed_lwps)
(linux_nat_target::wait, linux_nat_stop_lwp): Update.
* inf-ptrace.c (inf_ptrace_target::pid_to_str): Change return
type.
(inf_ptrace_target::attach): Update.
(inf_ptrace_target::files_info): Update.
* go32-nat.c (struct go32_nat_target) <pid_to_str>: Change return
type.
(go32_nat_target::pid_to_str): Change return type.
* gnu-nat.c (gnu_nat_target::pid_to_str): Change return type.
(gnu_nat_target::wait): Update.
(gnu_nat_target::wait): Update.
(gnu_nat_target::resume): Update.
* fbsd-nat.c (fbsd_nat_target::pid_to_str): Change return type.
(fbsd_nat_target::wait): Update.
* darwin-nat.c (darwin_nat_target::pid_to_str): Change return
type.
(darwin_nat_target::attach): Update.
* corelow.c (class core_target) <pid_to_str>: Change return type.
(core_target::pid_to_str): Change return type.
* target.c (normal_pid_to_str): Change return type.
(default_pid_to_str): Likewise.
(target_pid_to_str): Change return type.
(target_translate_tls_address): Update.
(target_announce_detach): Update.
* bsd-uthread.c (struct bsd_uthread_target) <pid_to_str>: Change
return type.
(bsd_uthread_target::pid_to_str): Change return type.
* bsd-kvm.c (class bsd_kvm_target) <pid_to_str>: Change return
type.
(bsd_kvm_target::pid_to_str): Change return type.
* aix-thread.c (class aix_thread_target) <pid_to_str>: Change
return type.
(aix_thread_target::pid_to_str): Change return type.
* target.h (struct target_ops) <pid_to_str>: Change return type.
(target_pid_to_str, normal_pid_to_str): Likewise.
* obsd-nat.h (class obsd_nat_target) <pid_to_str>: Change return
type.
* linux-nat.h (class linux_nat_target) <pid_to_str>: Change return
type.
* inf-ptrace.h (struct inf_ptrace_target) <pid_to_str>: Change
return type.
* gnu-nat.h (struct gnu_nat_target) <pid_to_str>: Change return
type.
* fbsd-nat.h (class fbsd_nat_target) <pid_to_str>: Change return
type.
* darwin-nat.h (class darwin_nat_target) <pid_to_str>: Change
return type.
2019-03-01 00:09:55 +08:00
|
|
|
target_pid_to_str (tp->ptid).c_str ());
|
2013-03-11 16:17:08 +08:00
|
|
|
|
|
|
|
btinfo = &tp->btrace;
|
2013-06-03 21:39:35 +08:00
|
|
|
tinfo = btinfo->target;
|
|
|
|
if (tinfo == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* There's no way we could get new trace while replaying.
|
|
|
|
On the other hand, delta trace would return a partial record with the
|
|
|
|
current PC, which is the replay PC, not the last PC, as expected. */
|
|
|
|
if (btinfo->replay != NULL)
|
2013-03-11 16:17:08 +08:00
|
|
|
return;
|
|
|
|
|
2020-06-19 04:28:31 +08:00
|
|
|
/* With CLI usage, TP is always the current thread when we get here.
|
|
|
|
However, since we can also store a gdb.Record object in Python
|
|
|
|
referring to a different thread than the current one, we need to
|
|
|
|
temporarily set the current thread. */
|
|
|
|
scoped_restore_current_thread restore_thread;
|
|
|
|
switch_to_thread (tp);
|
2017-05-02 17:35:54 +08:00
|
|
|
|
2016-11-30 18:05:38 +08:00
|
|
|
/* We should not be called on running or exited threads. */
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
gdb_assert (can_access_registers_thread (tp));
|
2016-11-30 18:05:38 +08:00
|
|
|
|
2013-06-03 21:39:35 +08:00
|
|
|
/* Let's first try to extend the trace we already have. */
|
2017-05-30 18:47:37 +08:00
|
|
|
if (!btinfo->functions.empty ())
|
2013-06-03 21:39:35 +08:00
|
|
|
{
|
|
|
|
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
|
|
|
|
if (errcode == 0)
|
|
|
|
{
|
|
|
|
/* Success. Let's try to stitch the traces together. */
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
errcode = btrace_stitch_trace (&btrace, tp);
|
2013-06-03 21:39:35 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We failed to read delta trace. Let's try to read new trace. */
|
|
|
|
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
|
|
|
|
|
|
|
|
/* If we got any new trace, discard what we have. */
|
2018-06-08 05:34:36 +08:00
|
|
|
if (errcode == 0 && !btrace.empty ())
|
2013-06-03 21:39:35 +08:00
|
|
|
btrace_clear (tp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we were not able to read the trace, we start over. */
|
|
|
|
if (errcode != 0)
|
|
|
|
{
|
|
|
|
btrace_clear (tp);
|
|
|
|
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
|
|
|
|
|
|
|
|
/* If we were not able to read the branch trace, signal an error. */
|
|
|
|
if (errcode != 0)
|
|
|
|
error (_("Failed to read branch trace."));
|
|
|
|
|
|
|
|
/* Compute the trace, provided we have any. */
|
2018-06-08 05:34:36 +08:00
|
|
|
if (!btrace.empty ())
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2014-02-03 18:40:50 +08:00
|
|
|
/* Store the raw trace data. The stored data will be cleared in
|
|
|
|
btrace_clear, so we always append the new trace. */
|
|
|
|
btrace_data_append (&btinfo->data, &btrace);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
btrace_maint_clear (btinfo);
|
2014-02-03 18:40:50 +08:00
|
|
|
|
2013-06-03 21:39:35 +08:00
|
|
|
btrace_clear_history (btinfo);
|
2018-02-02 19:29:48 +08:00
|
|
|
btrace_compute_ftrace (tp, &btrace, cpu);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_clear (struct thread_info *tp)
|
|
|
|
{
|
|
|
|
struct btrace_thread_info *btinfo;
|
|
|
|
|
Centralize thread ID printing
Add a new function to print a thread ID, in the style of paddress,
plongest, etc. and adjust all CLI-reachable paths to use it.
This gives us a single place to tweak to print inferior-qualified
thread IDs later:
- [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))]
+ [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))]
etc., though for now, this has no user-visible change.
No regressions on x86_64 Fedora 20.
gdb/ChangeLog:
2016-01-13 Pedro Alves <palves@redhat.com>
* breakpoint.c (remove_threaded_breakpoints)
(print_one_breakpoint_location): Use print_thread_id.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Use print_thread_id.
* common/print-utils.c (CELLSIZE): Delete.
(get_cell): Rename to ...
(get_print_cell): ... this and made extern. Adjust call callers.
Adjust to use PRINT_CELL_SIZE.
* common/print-utils.h (get_print_cell): Declare.
(PRINT_CELL_SIZE): New.
* gdbthread.h (print_thread_id): Declare.
* infcmd.c (signal_command): Use print_thread_id.
* inferior.c (print_inferior): Use print_thread_id.
* infrun.c (handle_signal_stop)
(insert_exception_resume_breakpoint)
(insert_exception_resume_from_probe)
(print_signal_received_reason): Use print_thread_id.
* record-btrace.c (record_btrace_info)
(record_btrace_resume_thread, record_btrace_cancel_resume)
(record_btrace_step_thread, record_btrace_wait): Use
print_thread_id.
* thread.c (thread_apply_all_command): Use print_thread_id.
(print_thread_id): New function.
(thread_apply_command): Use print_thread_id.
(thread_command, thread_find_command, do_captured_thread_select):
Use print_thread_id.
2016-01-13 18:56:06 +08:00
|
|
|
DEBUG ("clear thread %s (%s)", print_thread_id (tp),
|
Change pid_to_str to return std::string
Currently the target pid_to_str method returns a const char *, so many
implementations have a static buffer that they update. This patch
changes these methods to return a std::string instead. I think this
is cleaner and avoids possible gotchas when calling pid_to_str on
different ptids in a single statement. (Though no such calls exist
currently.)
This also updates various helper functions, and the gdbarch pid_to_str
methods.
I also made a best effort to fix all the callers, but I can't build
some of the *-nat.c files.
Tested by the buildbot.
gdb/ChangeLog
2019-03-13 Tom Tromey <tromey@adacore.com>
* i386-gnu-nat.c (i386_gnu_nat_target::fetch_registers)
(i386_gnu_nat_target::store_registers): Update.
* target-debug.h (target_debug_print_std_string): New macro.
* x86-linux-nat.c (x86_linux_nat_target::enable_btrace): Update.
* windows-tdep.c (display_one_tib): Update.
* tui/tui-stack.c (tui_make_status_line): Update.
* top.c (print_inferior_quit_action): Update.
* thread.c (thr_try_catch_cmd): Update.
(add_thread_with_info): Update.
(thread_target_id_str): Update.
(thr_try_catch_cmd): Update.
(thread_command): Update.
(thread_find_command): Update.
* record-btrace.c (record_btrace_target::info_record)
(record_btrace_resume_thread, record_btrace_target::resume)
(record_btrace_cancel_resume, record_btrace_step_thread)
(record_btrace_target::wait, record_btrace_target::wait)
(record_btrace_target::wait, record_btrace_target::stop): Update.
* progspace.c (print_program_space): Update.
* process-stratum-target.c
(process_stratum_target::thread_address_space): Update.
* linux-fork.c (linux_fork_mourn_inferior)
(detach_checkpoint_command, info_checkpoints_command)
(linux_fork_context): Update.
(linux_fork_detach): Update.
(class scoped_switch_fork_info): Update.
(delete_checkpoint_command): Update.
* infrun.c (follow_fork_inferior): Update.
(follow_fork_inferior): Update.
(proceed_after_vfork_done): Update.
(handle_vfork_child_exec_or_exit): Update.
(follow_exec): Update.
(displaced_step_prepare_throw): Update.
(displaced_step_restore): Update.
(start_step_over): Update.
(resume_1): Update.
(clear_proceed_status_thread): Update.
(proceed): Update.
(print_target_wait_results): Update.
(do_target_wait): Update.
(context_switch): Update.
(stop_all_threads): Update.
(restart_threads): Update.
(finish_step_over): Update.
(handle_signal_stop): Update.
(switch_back_to_stepped_thread): Update.
(keep_going_pass_signal): Update.
(print_exited_reason): Update.
(normal_stop): Update.
* inferior.c (inferior_pid_to_str): Change return type.
(print_selected_inferior): Update.
(add_inferior): Update.
(detach_inferior): Update.
* dummy-frame.c (fprint_dummy_frames): Update.
* dcache.c (dcache_info_1): Update.
* btrace.c (btrace_enable, btrace_disable, btrace_teardown)
(btrace_fetch, btrace_clear): Update.
* linux-tdep.c (linux_core_pid_to_str): Change return type.
* i386-cygwin-tdep.c (i386_windows_core_pid_to_str): Change return
type.
* fbsd-tdep.c (fbsd_core_pid_to_str): Change return type.
* sol2-tdep.h (sol2_core_pid_to_str): Change return type.
* sol2-tdep.c (sol2_core_pid_to_str): Change return type.
* gdbarch.c, gdbarch.h: Rebuild.
* gdbarch.sh (core_pid_to_str): Change return type.
* windows-nat.c (struct windows_nat_target) <pid_to_str>: Change
return type.
(windows_nat_target::pid_to_str): Change return type.
(windows_delete_thread): Update.
(windows_nat_target::attach): Update.
(windows_nat_target::files_info): Update.
* target-delegates.c: Rebuild.
* sol-thread.c (class sol_thread_target) <pid_to_str>: Change
return type.
(sol_thread_target::pid_to_str): Change return type.
* remote.c (class remote_target) <pid_to_str>: Change return
type.
(remote_target::pid_to_str): Change return type.
(extended_remote_target::attach, remote_target::remote_stop_ns)
(remote_target::remote_notif_remove_queued_reply)
(remote_target::push_stop_reply, remote_target::disable_btrace):
Update.
(extended_remote_target::attach): Update.
* remote-sim.c (struct gdbsim_target) <pid_to_str>: Change return
type.
(gdbsim_target::pid_to_str): Change return type.
* ravenscar-thread.c (struct ravenscar_thread_target)
<pid_to_str>: Change return type.
(ravenscar_thread_target::pid_to_str): Change return type.
* procfs.c (class procfs_target) <pid_to_str>: Change return
type.
(procfs_target::pid_to_str): Change return type.
(procfs_target::attach): Update.
(procfs_target::detach): Update.
(procfs_target::fetch_registers): Update.
(procfs_target::store_registers): Update.
(procfs_target::wait): Update.
(procfs_target::files_info): Update.
* obsd-nat.c (obsd_nat_target::pid_to_str): Change return type.
* nto-procfs.c (struct nto_procfs_target) <pid_to_str>: Change
return type.
(nto_procfs_target::pid_to_str): Change return type.
(nto_procfs_target::files_info, nto_procfs_target::attach): Update.
* linux-thread-db.c (class thread_db_target) <pid_to_str>: Change
return type.
* linux-nat.c (linux_nat_target::pid_to_str): Change return type.
(exit_lwp): Update.
(attach_proc_task_lwp_callback, get_detach_signal)
(detach_one_lwp, resume_lwp, linux_nat_target::resume)
(linux_nat_target::resume, wait_lwp, stop_callback)
(maybe_clear_ignore_sigint, stop_wait_callback, status_callback)
(save_stop_reason, select_event_lwp, linux_nat_filter_event)
(linux_nat_wait_1, resume_stopped_resumed_lwps)
(linux_nat_target::wait, linux_nat_stop_lwp): Update.
* inf-ptrace.c (inf_ptrace_target::pid_to_str): Change return
type.
(inf_ptrace_target::attach): Update.
(inf_ptrace_target::files_info): Update.
* go32-nat.c (struct go32_nat_target) <pid_to_str>: Change return
type.
(go32_nat_target::pid_to_str): Change return type.
* gnu-nat.c (gnu_nat_target::pid_to_str): Change return type.
(gnu_nat_target::wait): Update.
(gnu_nat_target::wait): Update.
(gnu_nat_target::resume): Update.
* fbsd-nat.c (fbsd_nat_target::pid_to_str): Change return type.
(fbsd_nat_target::wait): Update.
* darwin-nat.c (darwin_nat_target::pid_to_str): Change return
type.
(darwin_nat_target::attach): Update.
* corelow.c (class core_target) <pid_to_str>: Change return type.
(core_target::pid_to_str): Change return type.
* target.c (normal_pid_to_str): Change return type.
(default_pid_to_str): Likewise.
(target_pid_to_str): Change return type.
(target_translate_tls_address): Update.
(target_announce_detach): Update.
* bsd-uthread.c (struct bsd_uthread_target) <pid_to_str>: Change
return type.
(bsd_uthread_target::pid_to_str): Change return type.
* bsd-kvm.c (class bsd_kvm_target) <pid_to_str>: Change return
type.
(bsd_kvm_target::pid_to_str): Change return type.
* aix-thread.c (class aix_thread_target) <pid_to_str>: Change
return type.
(aix_thread_target::pid_to_str): Change return type.
* target.h (struct target_ops) <pid_to_str>: Change return type.
(target_pid_to_str, normal_pid_to_str): Likewise.
* obsd-nat.h (class obsd_nat_target) <pid_to_str>: Change return
type.
* linux-nat.h (class linux_nat_target) <pid_to_str>: Change return
type.
* inf-ptrace.h (struct inf_ptrace_target) <pid_to_str>: Change
return type.
* gnu-nat.h (struct gnu_nat_target) <pid_to_str>: Change return
type.
* fbsd-nat.h (class fbsd_nat_target) <pid_to_str>: Change return
type.
* darwin-nat.h (class darwin_nat_target) <pid_to_str>: Change
return type.
2019-03-01 00:09:55 +08:00
|
|
|
target_pid_to_str (tp->ptid).c_str ());
|
2013-03-11 16:17:08 +08:00
|
|
|
|
2013-03-27 16:49:47 +08:00
|
|
|
/* Make sure btrace frames that may hold a pointer into the branch
|
|
|
|
trace data are destroyed. */
|
|
|
|
reinit_frame_cache ();
|
|
|
|
|
2013-03-11 16:17:08 +08:00
|
|
|
btinfo = &tp->btrace;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
btinfo->functions.clear ();
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
btinfo->ngaps = 0;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
/* Must clear the maint data before - it depends on BTINFO->DATA. */
|
|
|
|
btrace_maint_clear (btinfo);
|
2018-06-08 05:34:36 +08:00
|
|
|
btinfo->data.clear ();
|
2013-06-03 21:39:35 +08:00
|
|
|
btrace_clear_history (btinfo);
|
2013-03-11 16:17:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_free_objfile (struct objfile *objfile)
|
|
|
|
{
|
|
|
|
DEBUG ("free objfile");
|
|
|
|
|
Per-inferior thread list, thread ranges/iterators, down with ALL_THREADS, etc.
As preparation for multi-target, this patch makes each inferior have
its own thread list.
This isn't absolutely necessary for multi-target, but simplifies
things. It originally stemmed from the desire to eliminate the
init_thread_list calls sprinkled around, plus it makes it more
efficient to iterate over threads of a given inferior (no need to
always iterate over threads of all inferiors).
We still need to iterate over threads of all inferiors in a number of
places, which means we'd need adjust the ALL_THREADS /
ALL_NON_EXITED_THREADS macros. However, naively tweaking those macros
to have an extra for loop, like:
#define ALL_THREADS (thr, inf) \
for (inf = inferior_list; inf; inf = inf->next) \
for (thr = inf->thread_list; thr; thr = thr->next)
causes problems with code that does "break" or "continue" within the
ALL_THREADS loop body. Plus, we need to declare the extra "inf" local
variable in order to pass it as temporary variable to ALL_THREADS
(etc.)
It gets even trickier when we consider extending the macros to filter
out threads matching a ptid_t and a target. The macros become tricker
to read/write. Been there.
An alternative (which was my next attempt), is to replace the
ALL_THREADS etc. iteration style with for_each_all_threads,
for_each_non_exited_threads, etc. functions which would take a
callback as parameter, which would usually be passed a lambda.
However, I did not find that satisfactory at all, because the
resulting code ends up a little less natural / more noisy to read,
write and debug/step-through (due to use of lambdas), and in many
places where we use "continue;" to skip to the next thread now need to
use "return;". (I ran into hard to debug bugs caused by a
continue/return confusion.)
I.e., before:
ALL_NON_EXITED_THREADS (tp)
{
if (tp->not_what_I_want)
continue;
// do something
}
would turn into:
for_each_non_exited_thread ([&] (thread_info *tp)
{
if (tp->not_what_I_want)
return;
// do something
});
Lastly, the solution I settled with was to replace the ALL_THREADS /
ALL_NON_EXITED_THREADS / ALL_INFERIORS macros with (C++20-like) ranges
and iterators, such that you can instead naturaly iterate over
threads/inferiors using range-for, like e.g,.:
// all threads, including THREAD_EXITED threads.
for (thread_info *tp : all_threads ())
{ .... }
// all non-exited threads.
for (thread_info *tp : all_non_exited_threads ())
{ .... }
// all non-exited threads of INF inferior.
for (thread_info *tp : inf->non_exited_threads ())
{ .... }
The all_non_exited_threads() function takes an optional filter ptid_t as
parameter, which is quite convenient when we need to iterate over
threads matching that filter. See e.g., how the
set_executing/set_stop_requested/finish_thread_state etc. functions in
thread.c end up being simplified.
Most of the patch thus is about adding the infrustructure for allowing
the above. Later on when we get to actual multi-target, these
functions/ranges/iterators will gain a "target_ops *" parameter so
that e.g., we can iterate over all threads of a given target that
match a given filter ptid_t.
The only entry points users needs to be aware of are the
all_threads/all_non_exited_threads etc. functions seen above. Thus,
those functions are declared in gdbthread.h/inferior.h. The actual
iterators/ranges are mainly "internals" and thus are put out of view
in the new thread-iter.h/thread-iter.c/inferior-iter.h files. That
keeps the gdbthread.h/inferior.h headers quite a bit more readable.
A common/safe-iterator.h header is added which adds a template that
can be used to build "safe" iterators, which are forward iterators
that can be used to replace the ALL_THREADS_SAFE macro and other
instances of the same idiom in future.
There's a little bit of shuffling of code between
gdbthread.h/thread.c/inferior.h in the patch. That is necessary in
order to avoid circular dependencies between the
gdbthread.h/inferior.h headers.
As for the init_thread_list calls sprinkled around, they're all
eliminated by this patch, and a new, central call is added to
inferior_appeared. Note how also related to that, there's a call to
init_wait_for_inferior in remote.c that is eliminated.
init_wait_for_inferior is currently responsible for discarding skipped
inline frames, which had to be moved elsewhere. Given that nowadays
we always have a thread even for single-threaded processes, the
natural place is to delete a frame's inline frame info when we delete
the thread. I.e., from clear_thread_inferior_resources.
gdb/ChangeLog:
2018-11-22 Pedro Alves <palves@redhat.com>
* Makefile.in (COMMON_SFILES): Add thread-iter.c.
* breakpoint.c (breakpoints_should_be_inserted_now): Replace
ALL_NON_EXITED_THREADS with all_non_exited_threads.
(print_one_breakpoint_location): Replace ALL_INFERIORS with
all_inferiors.
* bsd-kvm.c: Include inferior.h.
* btrace.c (btrace_free_objfile): Replace ALL_NON_EXITED_THREADS
with all_non_exited_threads.
* common/filtered-iterator.h: New.
* common/safe-iterator.h: New.
* corelow.c (core_target_open): Don't call init_thread_list here.
* darwin-nat.c (thread_info_from_private_thread_info): Replace
ALL_THREADS with all_threads.
* fbsd-nat.c (fbsd_nat_target::resume): Replace
ALL_NON_EXITED_THREADS with inf->non_exited_threads.
* fbsd-tdep.c (fbsd_make_corefile_notes): Replace
ALL_NON_EXITED_THREADS with inf->non_exited_threads.
* fork-child.c (postfork_hook): Don't call init_thread_list here.
* gdbarch-selftests.c (register_to_value_test): Adjust.
* gdbthread.h: Don't include "inferior.h" here.
(struct inferior): Forward declare.
(enum step_over_calls_kind): Moved here from inferior.h.
(thread_info::deletable): Definition moved to thread.c.
(find_thread_ptid (inferior *, ptid_t)): Declare.
(ALL_THREADS, ALL_THREADS_BY_INFERIOR, ALL_THREADS_SAFE): Delete.
Include "thread-iter.h".
(all_threads, all_non_exited_threads, all_threads_safe): New.
(any_thread_p): Declare.
(thread_list): Delete.
* infcmd.c (signal_command): Replace ALL_NON_EXITED_THREADS with
all_non_exited_threads.
(proceed_after_attach_callback): Delete.
(proceed_after_attach): Take an inferior pointer instead of an
integer PID. Adjust to use range-for.
(attach_post_wait): Pass down inferior pointer instead of pid.
Use range-for instead of ALL_NON_EXITED_THREADS.
(detach_command): Remove init_thread_list call.
* inferior-iter.h: New.
* inferior.c (struct delete_thread_of_inferior_arg): Delete.
(delete_thread_of_inferior): Delete.
(delete_inferior, exit_inferior_1): Use range-for with
inf->threads_safe() instead of iterate_over_threads.
(inferior_appeared): Call init_thread_list here.
(discard_all_inferiors): Use all_non_exited_inferiors.
(find_inferior_id, find_inferior_pid): Use all_inferiors.
(iterate_over_inferiors): Use all_inferiors_safe.
(have_inferiors, number_of_live_inferiors): Use
all_non_exited_inferiors.
(number_of_inferiors): Use all_inferiors and std::distance.
(print_inferior): Use all_inferiors.
* inferior.h: Include gdbthread.h.
(enum step_over_calls_kind): Moved to gdbthread.h.
(struct inferior) <thread_list>: New field.
<threads, non_exited_threads, threads_safe>: New methods.
(ALL_INFERIORS): Delete.
Include "inferior-iter.h".
(ALL_NON_EXITED_INFERIORS): Delete.
(all_inferiors_safe, all_inferiors, all_non_exited_inferiors): New
functions.
* inflow.c (child_interrupt, child_pass_ctrlc): Replace
ALL_NON_EXITED_THREADS with all_non_exited_threads.
* infrun.c (follow_exec): Use all_threads_safe.
(clear_proceed_status, proceed): Use all_non_exited_threads.
(init_wait_for_inferior): Don't clear inline frame state here.
(infrun_thread_stop_requested, for_each_just_stopped_thread): Use
all_threads instead of ALL_NON_EXITED_THREADS.
(random_pending_event_thread): Use all_non_exited_threads instead
of ALL_NON_EXITED_THREADS. Use a lambda for repeated code.
(clean_up_just_stopped_threads_fsms): Use all_non_exited_threads
instead of ALL_NON_EXITED_THREADS.
(handle_no_resumed): Use all_non_exited_threads instead of
ALL_NON_EXITED_THREADS. Use all_inferiors instead of
ALL_INFERIORS.
(restart_threads, switch_back_to_stepped_thread): Use
all_non_exited_threads instead of ALL_NON_EXITED_THREADS.
* linux-nat.c (check_zombie_leaders): Replace ALL_INFERIORS with
all_inferiors.
(kill_unfollowed_fork_children): Use inf->non_exited_threads
instead of ALL_NON_EXITED_THREADS.
* linux-tdep.c (linux_make_corefile_notes): Use
inf->non_exited_threads instead of ALL_NON_EXITED_THREADS.
* linux-thread-db.c (thread_db_target::update_thread_list):
Replace ALL_INFERIORS with all_inferiors.
(thread_db_target::thread_handle_to_thread_info): Use
inf->non_exited_threads instead of ALL_NON_EXITED_THREADS.
* mi/mi-interp.c (multiple_inferiors_p): New.
(mi_on_resume_1): Simplify using all_non_exited_threads and
multiple_inferiors_p.
* mi/mi-main.c (mi_cmd_thread_list_ids): Use all_non_exited_threads
instead of ALL_NON_EXITED_THREADS.
* nto-procfs.c (nto_procfs_target::open): Don't call
init_thread_list here.
* record-btrace.c (record_btrace_target_open)
(record_btrace_target::stop_recording)
(record_btrace_target::close)
(record_btrace_target::record_is_replaying)
(record_btrace_target::resume, record_btrace_target::wait)
(record_btrace_target::record_stop_replaying): Use
all_non_exited_threads instead of ALL_NON_EXITED_THREADS.
* record-full.c (record_full_wait_1): Use all_non_exited_threads
instead of ALL_NON_EXITED_THREADS.
* regcache.c (cooked_read_test): Remove reference to global
thread_list.
* remote-sim.c (gdbsim_target::create_inferior): Don't call
init_thread_list here.
* remote.c (remote_target::update_thread_list): Use
all_threads_safe instead of ALL_NON_EXITED_THREADS.
(remote_target::process_initial_stop_replies): Replace
ALL_INFERIORS with all_non_exited_inferiors and use
all_non_exited_threads instead of ALL_NON_EXITED_THREADS.
(remote_target::open_1): Don't call init_thread_list here.
(remote_target::append_pending_thread_resumptions)
(remote_target::remote_resume_with_hc): Use all_non_exited_threads
instead of ALL_NON_EXITED_THREADS.
(remote_target::commit_resume)
(remote_target::remove_new_fork_children): Replace ALL_INFERIORS
with all_non_exited_inferiors and use all_non_exited_threads
instead of ALL_NON_EXITED_THREADS.
(remote_target::kill_new_fork_children): Use
all_non_exited_threads instead of ALL_NON_EXITED_THREADS. Remove
init_thread_list and init_wait_for_inferior calls.
(remote_target::remote_btrace_maybe_reopen)
(remote_target::thread_handle_to_thread_info): Use
all_non_exited_threads instead of ALL_NON_EXITED_THREADS.
* target.c (target_terminal::restore_inferior)
(target_terminal_is_ours_kind): Replace ALL_INFERIORS with
all_non_exited_inferiors.
* thread-iter.c: New file.
* thread-iter.h: New file.
* thread.c: Include "inline-frame.h".
(thread_list): Delete.
(clear_thread_inferior_resources): Call clear_inline_frame_state.
(init_thread_list): Use all_threads_safe instead of
ALL_THREADS_SAFE. Adjust to per-inferior thread lists.
(new_thread): Adjust to per-inferior thread lists.
(add_thread_silent): Pass inferior to find_thread_ptid.
(thread_info::deletable): New, moved from the header.
(delete_thread_1): Adjust to per-inferior thread lists.
(find_thread_global_id): Use inf->threads().
(find_thread_ptid): Use find_inferior_ptid and pass inferior to
find_thread_ptid.
(find_thread_ptid(inferior*, ptid_t)): New overload.
(iterate_over_threads): Use all_threads_safe.
(any_thread_p): New.
(thread_count): Use all_threads and std::distance.
(live_threads_count): Use all_non_exited_threads and
std::distance.
(valid_global_thread_id): Use all_threads.
(in_thread_list): Use find_thread_ptid.
(first_thread_of_inferior): Adjust to per-inferior thread lists.
(any_thread_of_inferior, any_live_thread_of_inferior): Use
inf->non_exited_threads().
(prune_threads, delete_exited_threads): Use all_threads_safe.
(thread_change_ptid): Pass inferior pointer to find_thread_ptid.
(set_resumed, set_running): Use all_non_exited_threads.
(is_thread_state, is_stopped, is_exited, is_running)
(is_executing): Delete.
(set_executing, set_stop_requested, finish_thread_state): Use
all_non_exited_threads.
(print_thread_info_1): Use all_inferiors and all_threads.
(thread_apply_all_command): Use all_non_exited_threads.
(thread_find_command): Use all_threads.
(update_threads_executing): Use all_non_exited_threads.
* tid-parse.c (parse_thread_id): Use inf->threads.
* x86-bsd-nat.c (x86bsd_dr_set): Use inf->non_exited_threads ().
2018-11-23 00:09:14 +08:00
|
|
|
for (thread_info *tp : all_non_exited_threads ())
|
2013-03-11 16:17:08 +08:00
|
|
|
btrace_clear (tp);
|
|
|
|
}
|
2013-03-11 16:28:58 +08:00
|
|
|
|
|
|
|
#if defined (HAVE_LIBEXPAT)
|
|
|
|
|
|
|
|
/* Check the btrace document version. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
check_xml_btrace_version (struct gdb_xml_parser *parser,
|
|
|
|
const struct gdb_xml_element *element,
|
2018-01-07 22:29:52 +08:00
|
|
|
void *user_data,
|
|
|
|
std::vector<gdb_xml_value> &attributes)
|
2013-03-11 16:28:58 +08:00
|
|
|
{
|
Add some more casts (1/2)
Note: I needed to split this patch in two, otherwise it's too big for
the mailing list.
This patch adds explicit casts to situations where a void pointer is
assigned to a pointer to the "real" type. Building in C++ mode requires
those assignments to use an explicit cast. This includes, for example:
- callback arguments (cleanups, comparison functions, ...)
- data attached to some object (objfile, program space, etc) in the form
of a void pointer
- "user data" passed to some function
This patch comes from the commit "(mostly) auto-generated patch to insert
casts needed for C++", taken from Pedro's C++ branch.
Only files built on x86 with --enable-targets=all are modified, so the
native files for other arches will need to be dealt with separately.
I built-tested this with --enable-targets=all and reg-tested. To my
surprise, a test case (selftest.exp) had to be adjusted.
Here's the ChangeLog entry. Again, this was relatively quick to make
despite the length, thanks to David Malcom's script, although I don't
believe it's very useful information in that particular case...
gdb/ChangeLog:
* aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s).
(aarch64_make_stub_cache): Likewise.
(value_of_aarch64_user_reg): Likewise.
* ada-lang.c (ada_inferior_data_cleanup): Likewise.
(get_ada_inferior_data): Likewise.
(get_ada_pspace_data): Likewise.
(ada_pspace_data_cleanup): Likewise.
(ada_complete_symbol_matcher): Likewise.
(ada_exc_search_name_matches): Likewise.
* ada-tasks.c (get_ada_tasks_pspace_data): Likewise.
(get_ada_tasks_inferior_data): Likewise.
* addrmap.c (addrmap_mutable_foreach_worker): Likewise.
(splay_obstack_alloc): Likewise.
(splay_obstack_free): Likewise.
* alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise.
(alpha_linux_collect_gregset): Likewise.
(alpha_linux_supply_fpregset): Likewise.
(alpha_linux_collect_fpregset): Likewise.
* alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise.
* alpha-tdep.c (alpha_lds): Likewise.
(alpha_sts): Likewise.
(alpha_sigtramp_frame_unwind_cache): Likewise.
(alpha_heuristic_frame_unwind_cache): Likewise.
(alpha_supply_int_regs): Likewise.
(alpha_fill_int_regs): Likewise.
(alpha_supply_fp_regs): Likewise.
(alpha_fill_fp_regs): Likewise.
* alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise.
(alphanbsd_aout_supply_gregset): Likewise.
(alphanbsd_supply_gregset): Likewise.
* amd64-linux-tdep.c (amd64_linux_init_abi): Likewise.
(amd64_x32_linux_init_abi): Likewise.
* amd64-nat.c (amd64_supply_native_gregset): Likewise.
(amd64_collect_native_gregset): Likewise.
* amd64-tdep.c (amd64_frame_cache): Likewise.
(amd64_sigtramp_frame_cache): Likewise.
(amd64_epilogue_frame_cache): Likewise.
(amd64_supply_fxsave): Likewise.
(amd64_supply_xsave): Likewise.
(amd64_collect_fxsave): Likewise.
(amd64_collect_xsave): Likewise.
* amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise.
* amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise.
* arm-linux-tdep.c (arm_linux_supply_gregset): Likewise.
(arm_linux_collect_gregset): Likewise.
(arm_linux_supply_nwfpe): Likewise.
(arm_linux_collect_nwfpe): Likewise.
(arm_linux_supply_vfp): Likewise.
(arm_linux_collect_vfp): Likewise.
* arm-tdep.c (arm_find_mapping_symbol): Likewise.
(arm_prologue_unwind_stop_reason): Likewise.
(arm_prologue_this_id): Likewise.
(arm_prologue_prev_register): Likewise.
(arm_exidx_data_free): Likewise.
(arm_find_exidx_entry): Likewise.
(arm_stub_this_id): Likewise.
(arm_m_exception_this_id): Likewise.
(arm_m_exception_prev_register): Likewise.
(arm_normal_frame_base): Likewise.
(gdb_print_insn_arm): Likewise.
(arm_objfile_data_free): Likewise.
(arm_record_special_symbol): Likewise.
(value_of_arm_user_reg): Likewise.
* armbsd-tdep.c (armbsd_supply_fpregset): Likewise.
(armbsd_supply_gregset): Likewise.
* auto-load.c (auto_load_pspace_data_cleanup): Likewise.
(get_auto_load_pspace_data): Likewise.
(hash_loaded_script_entry): Likewise.
(eq_loaded_script_entry): Likewise.
(clear_section_scripts): Likewise.
(collect_matching_scripts): Likewise.
* auxv.c (auxv_inferior_data_cleanup): Likewise.
(get_auxv_inferior_data): Likewise.
* avr-tdep.c (avr_frame_unwind_cache): Likewise.
* ax-general.c (do_free_agent_expr_cleanup): Likewise.
* bfd-target.c (target_bfd_xfer_partial): Likewise.
(target_bfd_xclose): Likewise.
(target_bfd_get_section_table): Likewise.
* bfin-tdep.c (bfin_frame_cache): Likewise.
* block.c (find_block_in_blockvector): Likewise.
(call_site_for_pc): Likewise.
(block_find_non_opaque_type_preferred): Likewise.
* break-catch-sig.c (signal_catchpoint_insert_location): Likewise.
(signal_catchpoint_remove_location): Likewise.
(signal_catchpoint_breakpoint_hit): Likewise.
(signal_catchpoint_print_one): Likewise.
(signal_catchpoint_print_mention): Likewise.
(signal_catchpoint_print_recreate): Likewise.
* break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise.
* breakpoint.c (do_cleanup_counted_command_line): Likewise.
(bp_location_compare_addrs): Likewise.
(get_first_locp_gte_addr): Likewise.
(check_tracepoint_command): Likewise.
(do_map_commands_command): Likewise.
(get_breakpoint_objfile_data): Likewise.
(free_breakpoint_probes): Likewise.
(do_captured_breakpoint_query): Likewise.
(compare_breakpoints): Likewise.
(bp_location_compare): Likewise.
(bpstat_remove_breakpoint_callback): Likewise.
(do_delete_breakpoint_cleanup): Likewise.
* bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise.
(bsd_uthread_set_collect_uthread): Likewise.
(bsd_uthread_activate): Likewise.
(bsd_uthread_fetch_registers): Likewise.
(bsd_uthread_store_registers): Likewise.
* btrace.c (check_xml_btrace_version): Likewise.
(parse_xml_btrace_block): Likewise.
(parse_xml_btrace_pt_config_cpu): Likewise.
(parse_xml_btrace_pt_raw): Likewise.
(parse_xml_btrace_pt): Likewise.
(parse_xml_btrace_conf_bts): Likewise.
(parse_xml_btrace_conf_pt): Likewise.
(do_btrace_data_cleanup): Likewise.
* c-typeprint.c (find_typedef_for_canonicalize): Likewise.
* charset.c (cleanup_iconv): Likewise.
(do_cleanup_iterator): Likewise.
* cli-out.c (cli_uiout_dtor): Likewise.
(cli_table_begin): Likewise.
(cli_table_body): Likewise.
(cli_table_end): Likewise.
(cli_table_header): Likewise.
(cli_begin): Likewise.
(cli_end): Likewise.
(cli_field_int): Likewise.
(cli_field_skip): Likewise.
(cli_field_string): Likewise.
(cli_field_fmt): Likewise.
(cli_spaces): Likewise.
(cli_text): Likewise.
(cli_message): Likewise.
(cli_wrap_hint): Likewise.
(cli_flush): Likewise.
(cli_redirect): Likewise.
(out_field_fmt): Likewise.
(field_separator): Likewise.
(cli_out_set_stream): Likewise.
* cli/cli-cmds.c (compare_symtabs): Likewise.
* cli/cli-dump.c (call_dump_func): Likewise.
(restore_section_callback): Likewise.
* cli/cli-script.c (clear_hook_in_cleanup): Likewise.
(do_restore_user_call_depth): Likewise.
(do_free_command_lines_cleanup): Likewise.
* coff-pe-read.c (get_section_vmas): Likewise.
(pe_as16): Likewise.
(pe_as32): Likewise.
* coffread.c (coff_symfile_read): Likewise.
* common/agent.c (agent_look_up_symbols): Likewise.
* common/filestuff.c (do_close_cleanup): Likewise.
* common/format.c (free_format_pieces_cleanup): Likewise.
* common/vec.c (vec_o_reserve): Likewise.
* compile/compile-c-support.c (print_one_macro): Likewise.
* compile/compile-c-symbols.c (hash_symbol_error): Likewise.
(eq_symbol_error): Likewise.
(del_symbol_error): Likewise.
(error_symbol_once): Likewise.
(gcc_convert_symbol): Likewise.
(gcc_symbol_address): Likewise.
(hash_symname): Likewise.
(eq_symname): Likewise.
* compile/compile-c-types.c (hash_type_map_instance): Likewise.
(eq_type_map_instance): Likewise.
(insert_type): Likewise.
(convert_type): Likewise.
* compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise.
(setup_sections): Likewise.
(link_hash_table_free): Likewise.
(copy_sections): Likewise.
* compile/compile-object-run.c (do_module_cleanup): Likewise.
* compile/compile.c (compile_print_value): Likewise.
(do_rmdir): Likewise.
(cleanup_compile_instance): Likewise.
(cleanup_unlink_file): Likewise.
* completer.c (free_completion_tracker): Likewise.
* corelow.c (add_to_spuid_list): Likewise.
* cp-namespace.c (reset_directive_searched): Likewise.
* cp-support.c (reset_directive_searched): Likewise.
* cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise.
(cris_frame_unwind_cache): Likewise.
* d-lang.c (builtin_d_type): Likewise.
* d-namespace.c (reset_directive_searched): Likewise.
* dbxread.c (dbx_free_symfile_info): Likewise.
(do_free_bincl_list_cleanup): Likewise.
* disasm.c (hash_dis_line_entry): Likewise.
(eq_dis_line_entry): Likewise.
(dis_asm_print_address): Likewise.
(fprintf_disasm): Likewise.
(do_ui_file_delete): Likewise.
* doublest.c (convert_floatformat_to_doublest): Likewise.
* dummy-frame.c (pop_dummy_frame_bpt): Likewise.
(dummy_frame_prev_register): Likewise.
(dummy_frame_this_id): Likewise.
* dwarf2-frame-tailcall.c (cache_hash): Likewise.
(cache_eq): Likewise.
(cache_find): Likewise.
(tailcall_frame_this_id): Likewise.
(dwarf2_tailcall_prev_register_first): Likewise.
(tailcall_frame_prev_register): Likewise.
(tailcall_frame_dealloc_cache): Likewise.
(tailcall_frame_prev_arch): Likewise.
* dwarf2-frame.c (dwarf2_frame_state_free): Likewise.
(dwarf2_frame_set_init_reg): Likewise.
(dwarf2_frame_init_reg): Likewise.
(dwarf2_frame_set_signal_frame_p): Likewise.
(dwarf2_frame_signal_frame_p): Likewise.
(dwarf2_frame_set_adjust_regnum): Likewise.
(dwarf2_frame_adjust_regnum): Likewise.
(clear_pointer_cleanup): Likewise.
(dwarf2_frame_cache): Likewise.
(find_cie): Likewise.
(dwarf2_frame_find_fde): Likewise.
* dwarf2expr.c (dwarf_expr_address_type): Likewise.
(free_dwarf_expr_context_cleanup): Likewise.
* dwarf2loc.c (locexpr_find_frame_base_location): Likewise.
(locexpr_get_frame_base): Likewise.
(loclist_find_frame_base_location): Likewise.
(loclist_get_frame_base): Likewise.
(dwarf_expr_dwarf_call): Likewise.
(dwarf_expr_get_base_type): Likewise.
(dwarf_expr_push_dwarf_reg_entry_value): Likewise.
(dwarf_expr_get_obj_addr): Likewise.
(entry_data_value_coerce_ref): Likewise.
(entry_data_value_copy_closure): Likewise.
(entry_data_value_free_closure): Likewise.
(get_frame_address_in_block_wrapper): Likewise.
(dwarf2_evaluate_property): Likewise.
(dwarf2_compile_property_to_c): Likewise.
(needs_frame_read_addr_from_reg): Likewise.
(needs_frame_get_reg_value): Likewise.
(needs_frame_frame_base): Likewise.
(needs_frame_frame_cfa): Likewise.
(needs_frame_tls_address): Likewise.
(needs_frame_dwarf_call): Likewise.
(needs_dwarf_reg_entry_value): Likewise.
(get_ax_pc): Likewise.
(locexpr_read_variable): Likewise.
(locexpr_read_variable_at_entry): Likewise.
(locexpr_read_needs_frame): Likewise.
(locexpr_describe_location): Likewise.
(locexpr_tracepoint_var_ref): Likewise.
(locexpr_generate_c_location): Likewise.
(loclist_read_variable): Likewise.
(loclist_read_variable_at_entry): Likewise.
(loclist_describe_location): Likewise.
(loclist_tracepoint_var_ref): Likewise.
(loclist_generate_c_location): Likewise.
* dwarf2read.c (line_header_hash_voidp): Likewise.
(line_header_eq_voidp): Likewise.
(dwarf2_has_info): Likewise.
(dwarf2_get_section_info): Likewise.
(locate_dwz_sections): Likewise.
(hash_file_name_entry): Likewise.
(eq_file_name_entry): Likewise.
(delete_file_name_entry): Likewise.
(dw2_setup): Likewise.
(dw2_get_file_names_reader): Likewise.
(dw2_find_pc_sect_compunit_symtab): Likewise.
(hash_signatured_type): Likewise.
(eq_signatured_type): Likewise.
(add_signatured_type_cu_to_table): Likewise.
(create_debug_types_hash_table): Likewise.
(lookup_dwo_signatured_type): Likewise.
(lookup_dwp_signatured_type): Likewise.
(lookup_signatured_type): Likewise.
(hash_type_unit_group): Likewise.
(eq_type_unit_group): Likewise.
(get_type_unit_group): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(sort_tu_by_abbrev_offset): Likewise.
(process_skeletonless_type_unit): Likewise.
(psymtabs_addrmap_cleanup): Likewise.
(dwarf2_read_symtab): Likewise.
(psymtab_to_symtab_1): Likewise.
(die_hash): Likewise.
(die_eq): Likewise.
(load_full_comp_unit_reader): Likewise.
(reset_die_in_process): Likewise.
(free_cu_line_header): Likewise.
(handle_DW_AT_stmt_list): Likewise.
(hash_dwo_file): Likewise.
(eq_dwo_file): Likewise.
(hash_dwo_unit): Likewise.
(eq_dwo_unit): Likewise.
(create_dwo_cu_reader): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(lookup_dwo_unit_in_dwp): Likewise.
(dwarf2_locate_dwo_sections): Likewise.
(dwarf2_locate_common_dwp_sections): Likewise.
(dwarf2_locate_v2_dwp_sections): Likewise.
(hash_dwp_loaded_cutus): Likewise.
(eq_dwp_loaded_cutus): Likewise.
(lookup_dwo_cutu): Likewise.
(abbrev_table_free_cleanup): Likewise.
(dwarf2_free_abbrev_table): Likewise.
(find_partial_die_in_comp_unit): Likewise.
(free_line_header_voidp): Likewise.
(follow_die_offset): Likewise.
(follow_die_sig_1): Likewise.
(free_heap_comp_unit): Likewise.
(free_stack_comp_unit): Likewise.
(dwarf2_free_objfile): Likewise.
(per_cu_offset_and_type_hash): Likewise.
(per_cu_offset_and_type_eq): Likewise.
(get_die_type_at_offset): Likewise.
(partial_die_hash): Likewise.
(partial_die_eq): Likewise.
(dwarf2_per_objfile_free): Likewise.
(hash_strtab_entry): Likewise.
(eq_strtab_entry): Likewise.
(add_string): Likewise.
(hash_symtab_entry): Likewise.
(eq_symtab_entry): Likewise.
(delete_symtab_entry): Likewise.
(cleanup_mapped_symtab): Likewise.
(add_indices_to_cpool): Likewise.
(hash_psymtab_cu_index): Likewise.
(eq_psymtab_cu_index): Likewise.
(add_address_entry_worker): Likewise.
(unlink_if_set): Likewise.
(write_one_signatured_type): Likewise.
(save_gdb_index_command): Likewise.
* elfread.c (elf_symtab_read): Likewise.
(elf_gnu_ifunc_cache_hash): Likewise.
(elf_gnu_ifunc_cache_eq): Likewise.
(elf_gnu_ifunc_record_cache): Likewise.
(elf_gnu_ifunc_resolve_by_cache): Likewise.
(elf_get_probes): Likewise.
(probe_key_free): Likewise.
* f-lang.c (builtin_f_type): Likewise.
* frame-base.c (frame_base_append_sniffer): Likewise.
(frame_base_set_default): Likewise.
(frame_base_find_by_frame): Likewise.
* frame-unwind.c (frame_unwind_prepend_unwinder): Likewise.
(frame_unwind_append_unwinder): Likewise.
(frame_unwind_find_by_frame): Likewise.
* frame.c (frame_addr_hash): Likewise.
(frame_addr_hash_eq): Likewise.
(frame_stash_find): Likewise.
(do_frame_register_read): Likewise.
(unwind_to_current_frame): Likewise.
(frame_cleanup_after_sniffer): Likewise.
* frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise.
* frv-tdep.c (frv_frame_unwind_cache): Likewise.
* ft32-tdep.c (ft32_frame_cache): Likewise.
* gcore.c (do_bfd_delete_cleanup): Likewise.
(gcore_create_callback): Likewise.
* gdb_bfd.c (hash_bfd): Likewise.
(eq_bfd): Likewise.
(gdb_bfd_open): Likewise.
(free_one_bfd_section): Likewise.
(gdb_bfd_ref): Likewise.
(gdb_bfd_unref): Likewise.
(get_section_descriptor): Likewise.
(gdb_bfd_map_section): Likewise.
(gdb_bfd_crc): Likewise.
(gdb_bfd_mark_parent): Likewise.
(gdb_bfd_record_inclusion): Likewise.
(gdb_bfd_requires_relocations): Likewise.
(print_one_bfd): Likewise.
* gdbtypes.c (type_pair_hash): Likewise.
(type_pair_eq): Likewise.
(builtin_type): Likewise.
(objfile_type): Likewise.
* gnu-v3-abi.c (vtable_ptrdiff_type): Likewise.
(vtable_address_point_offset): Likewise.
(gnuv3_get_vtable): Likewise.
(hash_value_and_voffset): Likewise.
(eq_value_and_voffset): Likewise.
(compare_value_and_voffset): Likewise.
(compute_vtable_size): Likewise.
(gnuv3_get_typeid_type): Likewise.
* go-lang.c (builtin_go_type): Likewise.
* guile/scm-block.c (bkscm_hash_block_smob): Likewise.
(bkscm_eq_block_smob): Likewise.
(bkscm_objfile_block_map): Likewise.
(bkscm_del_objfile_blocks): Likewise.
* guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise.
* guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise.
(gdbscm_disasm_print_address): Likewise.
* guile/scm-frame.c (frscm_hash_frame_smob): Likewise.
(frscm_eq_frame_smob): Likewise.
(frscm_inferior_frame_map): Likewise.
(frscm_del_inferior_frames): Likewise.
* guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise.
* guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise.
(ofscm_objfile_smob_from_objfile): Likewise.
* guile/scm-ports.c (ioscm_write): Likewise.
(ioscm_file_port_delete): Likewise.
(ioscm_file_port_rewind): Likewise.
(ioscm_file_port_put): Likewise.
(ioscm_file_port_write): Likewise.
* guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise.
(psscm_pspace_smob_from_pspace): Likewise.
* guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise.
(scscm_recording_unwind_handler): Likewise.
(gdbscm_with_catch): Likewise.
(scscm_call_0_body): Likewise.
(scscm_call_1_body): Likewise.
(scscm_call_2_body): Likewise.
(scscm_call_3_body): Likewise.
(scscm_call_4_body): Likewise.
(scscm_apply_1_body): Likewise.
(scscm_eval_scheme_string): Likewise.
(gdbscm_safe_eval_string): Likewise.
(scscm_source_scheme_script): Likewise.
(gdbscm_safe_source_script): Likewise.
* guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise.
(gdbscm_call_scm_from_stringn): Likewise.
* guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise.
(syscm_eq_symbol_smob): Likewise.
(syscm_get_symbol_map): Likewise.
(syscm_del_objfile_symbols): Likewise.
* guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise.
(stscm_eq_symtab_smob): Likewise.
(stscm_objfile_symtab_map): Likewise.
(stscm_del_objfile_symtabs): Likewise.
* guile/scm-type.c (tyscm_hash_type_smob): Likewise.
(tyscm_eq_type_smob): Likewise.
(tyscm_type_map): Likewise.
(tyscm_copy_type_recursive): Likewise.
(save_objfile_types): Likewise.
* guile/scm-utils.c (extract_arg): Likewise.
* h8300-tdep.c (h8300_frame_cache): Likewise.
* hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise.
* hppa-tdep.c (compare_unwind_entries): Likewise.
(find_unwind_entry): Likewise.
(hppa_frame_cache): Likewise.
(hppa_stub_frame_unwind_cache): Likewise.
* hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise.
* hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise.
(hppaobsd_supply_fpregset): Likewise.
* i386-cygwin-tdep.c (core_process_module_section): Likewise.
* i386-linux-tdep.c (i386_linux_init_abi): Likewise.
* i386-tdep.c (i386_frame_cache): Likewise.
(i386_epilogue_frame_cache): Likewise.
(i386_sigtramp_frame_cache): Likewise.
(i386_supply_gregset): Likewise.
(i386_collect_gregset): Likewise.
(i386_gdbarch_init): Likewise.
* i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise.
(i386obsd_trapframe_cache): Likewise.
* i387-tdep.c (i387_supply_fsave): Likewise.
(i387_collect_fsave): Likewise.
(i387_supply_fxsave): Likewise.
(i387_collect_fxsave): Likewise.
(i387_supply_xsave): Likewise.
(i387_collect_xsave): Likewise.
* ia64-tdep.c (ia64_frame_cache): Likewise.
(ia64_sigtramp_frame_cache): Likewise.
* infcmd.c (attach_command_continuation): Likewise.
(attach_command_continuation_free_args): Likewise.
* inferior.c (restore_inferior): Likewise.
(delete_thread_of_inferior): Likewise.
* inflow.c (inflow_inferior_data_cleanup): Likewise.
(get_inflow_inferior_data): Likewise.
(inflow_inferior_exit): Likewise.
* infrun.c (displaced_step_clear_cleanup): Likewise.
(restore_current_uiout_cleanup): Likewise.
(release_stop_context_cleanup): Likewise.
(do_restore_infcall_suspend_state_cleanup): Likewise.
(do_restore_infcall_control_state_cleanup): Likewise.
(restore_inferior_ptid): Likewise.
* inline-frame.c (block_starting_point_at): Likewise.
* iq2000-tdep.c (iq2000_frame_cache): Likewise.
* jit.c (get_jit_objfile_data): Likewise.
(get_jit_program_space_data): Likewise.
(jit_object_close_impl): Likewise.
(jit_find_objf_with_entry_addr): Likewise.
(jit_breakpoint_deleted): Likewise.
(jit_unwind_reg_set_impl): Likewise.
(jit_unwind_reg_get_impl): Likewise.
(jit_dealloc_cache): Likewise.
(jit_frame_sniffer): Likewise.
(jit_frame_prev_register): Likewise.
(jit_prepend_unwinder): Likewise.
(jit_inferior_exit_hook): Likewise.
(free_objfile_data): Likewise.
* jv-lang.c (jv_per_objfile_free): Likewise.
(get_dynamics_objfile): Likewise.
(get_java_class_symtab): Likewise.
(builtin_java_type): Likewise.
* language.c (language_string_char_type): Likewise.
(language_bool_type): Likewise.
(language_lookup_primitive_type): Likewise.
(language_lookup_primitive_type_as_symbol): Likewise.
* linespec.c (hash_address_entry): Likewise.
(eq_address_entry): Likewise.
(iterate_inline_only): Likewise.
(iterate_name_matcher): Likewise.
(decode_line_2_compare_items): Likewise.
(collect_one_symbol): Likewise.
(compare_symbols): Likewise.
(compare_msymbols): Likewise.
(add_symtabs_to_list): Likewise.
(collect_symbols): Likewise.
(compare_msyms): Likewise.
(add_minsym): Likewise.
(cleanup_linespec_result): Likewise.
* linux-fork.c (inferior_call_waitpid_cleanup): Likewise.
* linux-nat.c (delete_lwp_cleanup): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(resume_stopped_resumed_lwps): Likewise.
* linux-tdep.c (get_linux_gdbarch_data): Likewise.
(invalidate_linux_cache_inf): Likewise.
(get_linux_inferior_data): Likewise.
(linux_find_memory_regions_thunk): Likewise.
(linux_make_mappings_callback): Likewise.
(linux_corefile_thread_callback): Likewise.
(find_mapping_size): Likewise.
* linux-thread-db.c (find_new_threads_callback): Likewise.
* lm32-tdep.c (lm32_frame_cache): Likewise.
* m2-lang.c (builtin_m2_type): Likewise.
* m32c-tdep.c (m32c_analyze_frame_prologue): Likewise.
* m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise.
(m32r_linux_supply_gregset): Likewise.
(m32r_linux_collect_gregset): Likewise.
* m32r-tdep.c (m32r_frame_unwind_cache): Likewise.
* m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise.
* m68k-tdep.c (m68k_frame_cache): Likewise.
* m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise.
(m68kbsd_supply_gregset): Likewise.
* m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise.
* m88k-tdep.c (m88k_frame_cache): Likewise.
(m88k_supply_gregset): Likewise.
gdb/gdbserver/ChangeLog:
* dll.c (match_dll): Add cast(s).
(unloaded_dll): Likewise.
* linux-low.c (second_thread_of_pid_p): Likewise.
(delete_lwp_callback): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(linux_set_resume_request): Likewise.
* server.c (accumulate_file_name_length): Likewise.
(emit_dll_description): Likewise.
(handle_qxfer_threads_worker): Likewise.
(visit_actioned_threads): Likewise.
* thread-db.c (any_thread_of): Likewise.
* tracepoint.c (same_process_p): Likewise.
(match_blocktype): Likewise.
(build_traceframe_info_xml): Likewise.
gdb/testsuite/ChangeLog:
* gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected
source line.
2015-09-26 02:08:07 +08:00
|
|
|
const char *version
|
2018-01-07 22:29:52 +08:00
|
|
|
= (const char *) xml_find_attribute (attributes, "version")->value.get ();
|
2013-03-11 16:28:58 +08:00
|
|
|
|
|
|
|
if (strcmp (version, "1.0") != 0)
|
|
|
|
gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse a btrace "block" xml record. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
parse_xml_btrace_block (struct gdb_xml_parser *parser,
|
|
|
|
const struct gdb_xml_element *element,
|
2018-01-07 22:29:52 +08:00
|
|
|
void *user_data,
|
|
|
|
std::vector<gdb_xml_value> &attributes)
|
2013-03-11 16:28:58 +08:00
|
|
|
{
|
2013-11-13 22:31:07 +08:00
|
|
|
struct btrace_data *btrace;
|
2013-03-11 16:28:58 +08:00
|
|
|
ULONGEST *begin, *end;
|
|
|
|
|
Add some more casts (1/2)
Note: I needed to split this patch in two, otherwise it's too big for
the mailing list.
This patch adds explicit casts to situations where a void pointer is
assigned to a pointer to the "real" type. Building in C++ mode requires
those assignments to use an explicit cast. This includes, for example:
- callback arguments (cleanups, comparison functions, ...)
- data attached to some object (objfile, program space, etc) in the form
of a void pointer
- "user data" passed to some function
This patch comes from the commit "(mostly) auto-generated patch to insert
casts needed for C++", taken from Pedro's C++ branch.
Only files built on x86 with --enable-targets=all are modified, so the
native files for other arches will need to be dealt with separately.
I built-tested this with --enable-targets=all and reg-tested. To my
surprise, a test case (selftest.exp) had to be adjusted.
Here's the ChangeLog entry. Again, this was relatively quick to make
despite the length, thanks to David Malcom's script, although I don't
believe it's very useful information in that particular case...
gdb/ChangeLog:
* aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s).
(aarch64_make_stub_cache): Likewise.
(value_of_aarch64_user_reg): Likewise.
* ada-lang.c (ada_inferior_data_cleanup): Likewise.
(get_ada_inferior_data): Likewise.
(get_ada_pspace_data): Likewise.
(ada_pspace_data_cleanup): Likewise.
(ada_complete_symbol_matcher): Likewise.
(ada_exc_search_name_matches): Likewise.
* ada-tasks.c (get_ada_tasks_pspace_data): Likewise.
(get_ada_tasks_inferior_data): Likewise.
* addrmap.c (addrmap_mutable_foreach_worker): Likewise.
(splay_obstack_alloc): Likewise.
(splay_obstack_free): Likewise.
* alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise.
(alpha_linux_collect_gregset): Likewise.
(alpha_linux_supply_fpregset): Likewise.
(alpha_linux_collect_fpregset): Likewise.
* alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise.
* alpha-tdep.c (alpha_lds): Likewise.
(alpha_sts): Likewise.
(alpha_sigtramp_frame_unwind_cache): Likewise.
(alpha_heuristic_frame_unwind_cache): Likewise.
(alpha_supply_int_regs): Likewise.
(alpha_fill_int_regs): Likewise.
(alpha_supply_fp_regs): Likewise.
(alpha_fill_fp_regs): Likewise.
* alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise.
(alphanbsd_aout_supply_gregset): Likewise.
(alphanbsd_supply_gregset): Likewise.
* amd64-linux-tdep.c (amd64_linux_init_abi): Likewise.
(amd64_x32_linux_init_abi): Likewise.
* amd64-nat.c (amd64_supply_native_gregset): Likewise.
(amd64_collect_native_gregset): Likewise.
* amd64-tdep.c (amd64_frame_cache): Likewise.
(amd64_sigtramp_frame_cache): Likewise.
(amd64_epilogue_frame_cache): Likewise.
(amd64_supply_fxsave): Likewise.
(amd64_supply_xsave): Likewise.
(amd64_collect_fxsave): Likewise.
(amd64_collect_xsave): Likewise.
* amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise.
* amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise.
* arm-linux-tdep.c (arm_linux_supply_gregset): Likewise.
(arm_linux_collect_gregset): Likewise.
(arm_linux_supply_nwfpe): Likewise.
(arm_linux_collect_nwfpe): Likewise.
(arm_linux_supply_vfp): Likewise.
(arm_linux_collect_vfp): Likewise.
* arm-tdep.c (arm_find_mapping_symbol): Likewise.
(arm_prologue_unwind_stop_reason): Likewise.
(arm_prologue_this_id): Likewise.
(arm_prologue_prev_register): Likewise.
(arm_exidx_data_free): Likewise.
(arm_find_exidx_entry): Likewise.
(arm_stub_this_id): Likewise.
(arm_m_exception_this_id): Likewise.
(arm_m_exception_prev_register): Likewise.
(arm_normal_frame_base): Likewise.
(gdb_print_insn_arm): Likewise.
(arm_objfile_data_free): Likewise.
(arm_record_special_symbol): Likewise.
(value_of_arm_user_reg): Likewise.
* armbsd-tdep.c (armbsd_supply_fpregset): Likewise.
(armbsd_supply_gregset): Likewise.
* auto-load.c (auto_load_pspace_data_cleanup): Likewise.
(get_auto_load_pspace_data): Likewise.
(hash_loaded_script_entry): Likewise.
(eq_loaded_script_entry): Likewise.
(clear_section_scripts): Likewise.
(collect_matching_scripts): Likewise.
* auxv.c (auxv_inferior_data_cleanup): Likewise.
(get_auxv_inferior_data): Likewise.
* avr-tdep.c (avr_frame_unwind_cache): Likewise.
* ax-general.c (do_free_agent_expr_cleanup): Likewise.
* bfd-target.c (target_bfd_xfer_partial): Likewise.
(target_bfd_xclose): Likewise.
(target_bfd_get_section_table): Likewise.
* bfin-tdep.c (bfin_frame_cache): Likewise.
* block.c (find_block_in_blockvector): Likewise.
(call_site_for_pc): Likewise.
(block_find_non_opaque_type_preferred): Likewise.
* break-catch-sig.c (signal_catchpoint_insert_location): Likewise.
(signal_catchpoint_remove_location): Likewise.
(signal_catchpoint_breakpoint_hit): Likewise.
(signal_catchpoint_print_one): Likewise.
(signal_catchpoint_print_mention): Likewise.
(signal_catchpoint_print_recreate): Likewise.
* break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise.
* breakpoint.c (do_cleanup_counted_command_line): Likewise.
(bp_location_compare_addrs): Likewise.
(get_first_locp_gte_addr): Likewise.
(check_tracepoint_command): Likewise.
(do_map_commands_command): Likewise.
(get_breakpoint_objfile_data): Likewise.
(free_breakpoint_probes): Likewise.
(do_captured_breakpoint_query): Likewise.
(compare_breakpoints): Likewise.
(bp_location_compare): Likewise.
(bpstat_remove_breakpoint_callback): Likewise.
(do_delete_breakpoint_cleanup): Likewise.
* bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise.
(bsd_uthread_set_collect_uthread): Likewise.
(bsd_uthread_activate): Likewise.
(bsd_uthread_fetch_registers): Likewise.
(bsd_uthread_store_registers): Likewise.
* btrace.c (check_xml_btrace_version): Likewise.
(parse_xml_btrace_block): Likewise.
(parse_xml_btrace_pt_config_cpu): Likewise.
(parse_xml_btrace_pt_raw): Likewise.
(parse_xml_btrace_pt): Likewise.
(parse_xml_btrace_conf_bts): Likewise.
(parse_xml_btrace_conf_pt): Likewise.
(do_btrace_data_cleanup): Likewise.
* c-typeprint.c (find_typedef_for_canonicalize): Likewise.
* charset.c (cleanup_iconv): Likewise.
(do_cleanup_iterator): Likewise.
* cli-out.c (cli_uiout_dtor): Likewise.
(cli_table_begin): Likewise.
(cli_table_body): Likewise.
(cli_table_end): Likewise.
(cli_table_header): Likewise.
(cli_begin): Likewise.
(cli_end): Likewise.
(cli_field_int): Likewise.
(cli_field_skip): Likewise.
(cli_field_string): Likewise.
(cli_field_fmt): Likewise.
(cli_spaces): Likewise.
(cli_text): Likewise.
(cli_message): Likewise.
(cli_wrap_hint): Likewise.
(cli_flush): Likewise.
(cli_redirect): Likewise.
(out_field_fmt): Likewise.
(field_separator): Likewise.
(cli_out_set_stream): Likewise.
* cli/cli-cmds.c (compare_symtabs): Likewise.
* cli/cli-dump.c (call_dump_func): Likewise.
(restore_section_callback): Likewise.
* cli/cli-script.c (clear_hook_in_cleanup): Likewise.
(do_restore_user_call_depth): Likewise.
(do_free_command_lines_cleanup): Likewise.
* coff-pe-read.c (get_section_vmas): Likewise.
(pe_as16): Likewise.
(pe_as32): Likewise.
* coffread.c (coff_symfile_read): Likewise.
* common/agent.c (agent_look_up_symbols): Likewise.
* common/filestuff.c (do_close_cleanup): Likewise.
* common/format.c (free_format_pieces_cleanup): Likewise.
* common/vec.c (vec_o_reserve): Likewise.
* compile/compile-c-support.c (print_one_macro): Likewise.
* compile/compile-c-symbols.c (hash_symbol_error): Likewise.
(eq_symbol_error): Likewise.
(del_symbol_error): Likewise.
(error_symbol_once): Likewise.
(gcc_convert_symbol): Likewise.
(gcc_symbol_address): Likewise.
(hash_symname): Likewise.
(eq_symname): Likewise.
* compile/compile-c-types.c (hash_type_map_instance): Likewise.
(eq_type_map_instance): Likewise.
(insert_type): Likewise.
(convert_type): Likewise.
* compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise.
(setup_sections): Likewise.
(link_hash_table_free): Likewise.
(copy_sections): Likewise.
* compile/compile-object-run.c (do_module_cleanup): Likewise.
* compile/compile.c (compile_print_value): Likewise.
(do_rmdir): Likewise.
(cleanup_compile_instance): Likewise.
(cleanup_unlink_file): Likewise.
* completer.c (free_completion_tracker): Likewise.
* corelow.c (add_to_spuid_list): Likewise.
* cp-namespace.c (reset_directive_searched): Likewise.
* cp-support.c (reset_directive_searched): Likewise.
* cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise.
(cris_frame_unwind_cache): Likewise.
* d-lang.c (builtin_d_type): Likewise.
* d-namespace.c (reset_directive_searched): Likewise.
* dbxread.c (dbx_free_symfile_info): Likewise.
(do_free_bincl_list_cleanup): Likewise.
* disasm.c (hash_dis_line_entry): Likewise.
(eq_dis_line_entry): Likewise.
(dis_asm_print_address): Likewise.
(fprintf_disasm): Likewise.
(do_ui_file_delete): Likewise.
* doublest.c (convert_floatformat_to_doublest): Likewise.
* dummy-frame.c (pop_dummy_frame_bpt): Likewise.
(dummy_frame_prev_register): Likewise.
(dummy_frame_this_id): Likewise.
* dwarf2-frame-tailcall.c (cache_hash): Likewise.
(cache_eq): Likewise.
(cache_find): Likewise.
(tailcall_frame_this_id): Likewise.
(dwarf2_tailcall_prev_register_first): Likewise.
(tailcall_frame_prev_register): Likewise.
(tailcall_frame_dealloc_cache): Likewise.
(tailcall_frame_prev_arch): Likewise.
* dwarf2-frame.c (dwarf2_frame_state_free): Likewise.
(dwarf2_frame_set_init_reg): Likewise.
(dwarf2_frame_init_reg): Likewise.
(dwarf2_frame_set_signal_frame_p): Likewise.
(dwarf2_frame_signal_frame_p): Likewise.
(dwarf2_frame_set_adjust_regnum): Likewise.
(dwarf2_frame_adjust_regnum): Likewise.
(clear_pointer_cleanup): Likewise.
(dwarf2_frame_cache): Likewise.
(find_cie): Likewise.
(dwarf2_frame_find_fde): Likewise.
* dwarf2expr.c (dwarf_expr_address_type): Likewise.
(free_dwarf_expr_context_cleanup): Likewise.
* dwarf2loc.c (locexpr_find_frame_base_location): Likewise.
(locexpr_get_frame_base): Likewise.
(loclist_find_frame_base_location): Likewise.
(loclist_get_frame_base): Likewise.
(dwarf_expr_dwarf_call): Likewise.
(dwarf_expr_get_base_type): Likewise.
(dwarf_expr_push_dwarf_reg_entry_value): Likewise.
(dwarf_expr_get_obj_addr): Likewise.
(entry_data_value_coerce_ref): Likewise.
(entry_data_value_copy_closure): Likewise.
(entry_data_value_free_closure): Likewise.
(get_frame_address_in_block_wrapper): Likewise.
(dwarf2_evaluate_property): Likewise.
(dwarf2_compile_property_to_c): Likewise.
(needs_frame_read_addr_from_reg): Likewise.
(needs_frame_get_reg_value): Likewise.
(needs_frame_frame_base): Likewise.
(needs_frame_frame_cfa): Likewise.
(needs_frame_tls_address): Likewise.
(needs_frame_dwarf_call): Likewise.
(needs_dwarf_reg_entry_value): Likewise.
(get_ax_pc): Likewise.
(locexpr_read_variable): Likewise.
(locexpr_read_variable_at_entry): Likewise.
(locexpr_read_needs_frame): Likewise.
(locexpr_describe_location): Likewise.
(locexpr_tracepoint_var_ref): Likewise.
(locexpr_generate_c_location): Likewise.
(loclist_read_variable): Likewise.
(loclist_read_variable_at_entry): Likewise.
(loclist_describe_location): Likewise.
(loclist_tracepoint_var_ref): Likewise.
(loclist_generate_c_location): Likewise.
* dwarf2read.c (line_header_hash_voidp): Likewise.
(line_header_eq_voidp): Likewise.
(dwarf2_has_info): Likewise.
(dwarf2_get_section_info): Likewise.
(locate_dwz_sections): Likewise.
(hash_file_name_entry): Likewise.
(eq_file_name_entry): Likewise.
(delete_file_name_entry): Likewise.
(dw2_setup): Likewise.
(dw2_get_file_names_reader): Likewise.
(dw2_find_pc_sect_compunit_symtab): Likewise.
(hash_signatured_type): Likewise.
(eq_signatured_type): Likewise.
(add_signatured_type_cu_to_table): Likewise.
(create_debug_types_hash_table): Likewise.
(lookup_dwo_signatured_type): Likewise.
(lookup_dwp_signatured_type): Likewise.
(lookup_signatured_type): Likewise.
(hash_type_unit_group): Likewise.
(eq_type_unit_group): Likewise.
(get_type_unit_group): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(sort_tu_by_abbrev_offset): Likewise.
(process_skeletonless_type_unit): Likewise.
(psymtabs_addrmap_cleanup): Likewise.
(dwarf2_read_symtab): Likewise.
(psymtab_to_symtab_1): Likewise.
(die_hash): Likewise.
(die_eq): Likewise.
(load_full_comp_unit_reader): Likewise.
(reset_die_in_process): Likewise.
(free_cu_line_header): Likewise.
(handle_DW_AT_stmt_list): Likewise.
(hash_dwo_file): Likewise.
(eq_dwo_file): Likewise.
(hash_dwo_unit): Likewise.
(eq_dwo_unit): Likewise.
(create_dwo_cu_reader): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(lookup_dwo_unit_in_dwp): Likewise.
(dwarf2_locate_dwo_sections): Likewise.
(dwarf2_locate_common_dwp_sections): Likewise.
(dwarf2_locate_v2_dwp_sections): Likewise.
(hash_dwp_loaded_cutus): Likewise.
(eq_dwp_loaded_cutus): Likewise.
(lookup_dwo_cutu): Likewise.
(abbrev_table_free_cleanup): Likewise.
(dwarf2_free_abbrev_table): Likewise.
(find_partial_die_in_comp_unit): Likewise.
(free_line_header_voidp): Likewise.
(follow_die_offset): Likewise.
(follow_die_sig_1): Likewise.
(free_heap_comp_unit): Likewise.
(free_stack_comp_unit): Likewise.
(dwarf2_free_objfile): Likewise.
(per_cu_offset_and_type_hash): Likewise.
(per_cu_offset_and_type_eq): Likewise.
(get_die_type_at_offset): Likewise.
(partial_die_hash): Likewise.
(partial_die_eq): Likewise.
(dwarf2_per_objfile_free): Likewise.
(hash_strtab_entry): Likewise.
(eq_strtab_entry): Likewise.
(add_string): Likewise.
(hash_symtab_entry): Likewise.
(eq_symtab_entry): Likewise.
(delete_symtab_entry): Likewise.
(cleanup_mapped_symtab): Likewise.
(add_indices_to_cpool): Likewise.
(hash_psymtab_cu_index): Likewise.
(eq_psymtab_cu_index): Likewise.
(add_address_entry_worker): Likewise.
(unlink_if_set): Likewise.
(write_one_signatured_type): Likewise.
(save_gdb_index_command): Likewise.
* elfread.c (elf_symtab_read): Likewise.
(elf_gnu_ifunc_cache_hash): Likewise.
(elf_gnu_ifunc_cache_eq): Likewise.
(elf_gnu_ifunc_record_cache): Likewise.
(elf_gnu_ifunc_resolve_by_cache): Likewise.
(elf_get_probes): Likewise.
(probe_key_free): Likewise.
* f-lang.c (builtin_f_type): Likewise.
* frame-base.c (frame_base_append_sniffer): Likewise.
(frame_base_set_default): Likewise.
(frame_base_find_by_frame): Likewise.
* frame-unwind.c (frame_unwind_prepend_unwinder): Likewise.
(frame_unwind_append_unwinder): Likewise.
(frame_unwind_find_by_frame): Likewise.
* frame.c (frame_addr_hash): Likewise.
(frame_addr_hash_eq): Likewise.
(frame_stash_find): Likewise.
(do_frame_register_read): Likewise.
(unwind_to_current_frame): Likewise.
(frame_cleanup_after_sniffer): Likewise.
* frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise.
* frv-tdep.c (frv_frame_unwind_cache): Likewise.
* ft32-tdep.c (ft32_frame_cache): Likewise.
* gcore.c (do_bfd_delete_cleanup): Likewise.
(gcore_create_callback): Likewise.
* gdb_bfd.c (hash_bfd): Likewise.
(eq_bfd): Likewise.
(gdb_bfd_open): Likewise.
(free_one_bfd_section): Likewise.
(gdb_bfd_ref): Likewise.
(gdb_bfd_unref): Likewise.
(get_section_descriptor): Likewise.
(gdb_bfd_map_section): Likewise.
(gdb_bfd_crc): Likewise.
(gdb_bfd_mark_parent): Likewise.
(gdb_bfd_record_inclusion): Likewise.
(gdb_bfd_requires_relocations): Likewise.
(print_one_bfd): Likewise.
* gdbtypes.c (type_pair_hash): Likewise.
(type_pair_eq): Likewise.
(builtin_type): Likewise.
(objfile_type): Likewise.
* gnu-v3-abi.c (vtable_ptrdiff_type): Likewise.
(vtable_address_point_offset): Likewise.
(gnuv3_get_vtable): Likewise.
(hash_value_and_voffset): Likewise.
(eq_value_and_voffset): Likewise.
(compare_value_and_voffset): Likewise.
(compute_vtable_size): Likewise.
(gnuv3_get_typeid_type): Likewise.
* go-lang.c (builtin_go_type): Likewise.
* guile/scm-block.c (bkscm_hash_block_smob): Likewise.
(bkscm_eq_block_smob): Likewise.
(bkscm_objfile_block_map): Likewise.
(bkscm_del_objfile_blocks): Likewise.
* guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise.
* guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise.
(gdbscm_disasm_print_address): Likewise.
* guile/scm-frame.c (frscm_hash_frame_smob): Likewise.
(frscm_eq_frame_smob): Likewise.
(frscm_inferior_frame_map): Likewise.
(frscm_del_inferior_frames): Likewise.
* guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise.
* guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise.
(ofscm_objfile_smob_from_objfile): Likewise.
* guile/scm-ports.c (ioscm_write): Likewise.
(ioscm_file_port_delete): Likewise.
(ioscm_file_port_rewind): Likewise.
(ioscm_file_port_put): Likewise.
(ioscm_file_port_write): Likewise.
* guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise.
(psscm_pspace_smob_from_pspace): Likewise.
* guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise.
(scscm_recording_unwind_handler): Likewise.
(gdbscm_with_catch): Likewise.
(scscm_call_0_body): Likewise.
(scscm_call_1_body): Likewise.
(scscm_call_2_body): Likewise.
(scscm_call_3_body): Likewise.
(scscm_call_4_body): Likewise.
(scscm_apply_1_body): Likewise.
(scscm_eval_scheme_string): Likewise.
(gdbscm_safe_eval_string): Likewise.
(scscm_source_scheme_script): Likewise.
(gdbscm_safe_source_script): Likewise.
* guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise.
(gdbscm_call_scm_from_stringn): Likewise.
* guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise.
(syscm_eq_symbol_smob): Likewise.
(syscm_get_symbol_map): Likewise.
(syscm_del_objfile_symbols): Likewise.
* guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise.
(stscm_eq_symtab_smob): Likewise.
(stscm_objfile_symtab_map): Likewise.
(stscm_del_objfile_symtabs): Likewise.
* guile/scm-type.c (tyscm_hash_type_smob): Likewise.
(tyscm_eq_type_smob): Likewise.
(tyscm_type_map): Likewise.
(tyscm_copy_type_recursive): Likewise.
(save_objfile_types): Likewise.
* guile/scm-utils.c (extract_arg): Likewise.
* h8300-tdep.c (h8300_frame_cache): Likewise.
* hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise.
* hppa-tdep.c (compare_unwind_entries): Likewise.
(find_unwind_entry): Likewise.
(hppa_frame_cache): Likewise.
(hppa_stub_frame_unwind_cache): Likewise.
* hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise.
* hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise.
(hppaobsd_supply_fpregset): Likewise.
* i386-cygwin-tdep.c (core_process_module_section): Likewise.
* i386-linux-tdep.c (i386_linux_init_abi): Likewise.
* i386-tdep.c (i386_frame_cache): Likewise.
(i386_epilogue_frame_cache): Likewise.
(i386_sigtramp_frame_cache): Likewise.
(i386_supply_gregset): Likewise.
(i386_collect_gregset): Likewise.
(i386_gdbarch_init): Likewise.
* i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise.
(i386obsd_trapframe_cache): Likewise.
* i387-tdep.c (i387_supply_fsave): Likewise.
(i387_collect_fsave): Likewise.
(i387_supply_fxsave): Likewise.
(i387_collect_fxsave): Likewise.
(i387_supply_xsave): Likewise.
(i387_collect_xsave): Likewise.
* ia64-tdep.c (ia64_frame_cache): Likewise.
(ia64_sigtramp_frame_cache): Likewise.
* infcmd.c (attach_command_continuation): Likewise.
(attach_command_continuation_free_args): Likewise.
* inferior.c (restore_inferior): Likewise.
(delete_thread_of_inferior): Likewise.
* inflow.c (inflow_inferior_data_cleanup): Likewise.
(get_inflow_inferior_data): Likewise.
(inflow_inferior_exit): Likewise.
* infrun.c (displaced_step_clear_cleanup): Likewise.
(restore_current_uiout_cleanup): Likewise.
(release_stop_context_cleanup): Likewise.
(do_restore_infcall_suspend_state_cleanup): Likewise.
(do_restore_infcall_control_state_cleanup): Likewise.
(restore_inferior_ptid): Likewise.
* inline-frame.c (block_starting_point_at): Likewise.
* iq2000-tdep.c (iq2000_frame_cache): Likewise.
* jit.c (get_jit_objfile_data): Likewise.
(get_jit_program_space_data): Likewise.
(jit_object_close_impl): Likewise.
(jit_find_objf_with_entry_addr): Likewise.
(jit_breakpoint_deleted): Likewise.
(jit_unwind_reg_set_impl): Likewise.
(jit_unwind_reg_get_impl): Likewise.
(jit_dealloc_cache): Likewise.
(jit_frame_sniffer): Likewise.
(jit_frame_prev_register): Likewise.
(jit_prepend_unwinder): Likewise.
(jit_inferior_exit_hook): Likewise.
(free_objfile_data): Likewise.
* jv-lang.c (jv_per_objfile_free): Likewise.
(get_dynamics_objfile): Likewise.
(get_java_class_symtab): Likewise.
(builtin_java_type): Likewise.
* language.c (language_string_char_type): Likewise.
(language_bool_type): Likewise.
(language_lookup_primitive_type): Likewise.
(language_lookup_primitive_type_as_symbol): Likewise.
* linespec.c (hash_address_entry): Likewise.
(eq_address_entry): Likewise.
(iterate_inline_only): Likewise.
(iterate_name_matcher): Likewise.
(decode_line_2_compare_items): Likewise.
(collect_one_symbol): Likewise.
(compare_symbols): Likewise.
(compare_msymbols): Likewise.
(add_symtabs_to_list): Likewise.
(collect_symbols): Likewise.
(compare_msyms): Likewise.
(add_minsym): Likewise.
(cleanup_linespec_result): Likewise.
* linux-fork.c (inferior_call_waitpid_cleanup): Likewise.
* linux-nat.c (delete_lwp_cleanup): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(resume_stopped_resumed_lwps): Likewise.
* linux-tdep.c (get_linux_gdbarch_data): Likewise.
(invalidate_linux_cache_inf): Likewise.
(get_linux_inferior_data): Likewise.
(linux_find_memory_regions_thunk): Likewise.
(linux_make_mappings_callback): Likewise.
(linux_corefile_thread_callback): Likewise.
(find_mapping_size): Likewise.
* linux-thread-db.c (find_new_threads_callback): Likewise.
* lm32-tdep.c (lm32_frame_cache): Likewise.
* m2-lang.c (builtin_m2_type): Likewise.
* m32c-tdep.c (m32c_analyze_frame_prologue): Likewise.
* m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise.
(m32r_linux_supply_gregset): Likewise.
(m32r_linux_collect_gregset): Likewise.
* m32r-tdep.c (m32r_frame_unwind_cache): Likewise.
* m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise.
* m68k-tdep.c (m68k_frame_cache): Likewise.
* m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise.
(m68kbsd_supply_gregset): Likewise.
* m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise.
* m88k-tdep.c (m88k_frame_cache): Likewise.
(m88k_supply_gregset): Likewise.
gdb/gdbserver/ChangeLog:
* dll.c (match_dll): Add cast(s).
(unloaded_dll): Likewise.
* linux-low.c (second_thread_of_pid_p): Likewise.
(delete_lwp_callback): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(linux_set_resume_request): Likewise.
* server.c (accumulate_file_name_length): Likewise.
(emit_dll_description): Likewise.
(handle_qxfer_threads_worker): Likewise.
(visit_actioned_threads): Likewise.
* thread-db.c (any_thread_of): Likewise.
* tracepoint.c (same_process_p): Likewise.
(match_blocktype): Likewise.
(build_traceframe_info_xml): Likewise.
gdb/testsuite/ChangeLog:
* gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected
source line.
2015-09-26 02:08:07 +08:00
|
|
|
btrace = (struct btrace_data *) user_data;
|
2013-11-13 22:31:07 +08:00
|
|
|
|
|
|
|
switch (btrace->format)
|
|
|
|
{
|
|
|
|
case BTRACE_FORMAT_BTS:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_NONE:
|
|
|
|
btrace->format = BTRACE_FORMAT_BTS;
|
2019-10-03 05:01:46 +08:00
|
|
|
btrace->variant.bts.blocks = new std::vector<btrace_block>;
|
2013-11-13 22:31:07 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
gdb_xml_error (parser, _("Btrace format error."));
|
|
|
|
}
|
2013-03-11 16:28:58 +08:00
|
|
|
|
2018-01-07 22:29:52 +08:00
|
|
|
begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value.get ();
|
|
|
|
end = (ULONGEST *) xml_find_attribute (attributes, "end")->value.get ();
|
2019-09-16 21:12:27 +08:00
|
|
|
btrace->variant.bts.blocks->emplace_back (*begin, *end);
|
2013-03-11 16:28:58 +08:00
|
|
|
}
|
|
|
|
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
/* Parse a "raw" xml record. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
|
2015-07-08 15:38:16 +08:00
|
|
|
gdb_byte **pdata, size_t *psize)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
2018-06-08 05:38:25 +08:00
|
|
|
gdb_byte *bin;
|
2015-07-08 15:38:16 +08:00
|
|
|
size_t len, size;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
len = strlen (body_text);
|
2015-07-08 15:38:16 +08:00
|
|
|
if (len % 2 != 0)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
gdb_xml_error (parser, _("Bad raw data size."));
|
|
|
|
|
2015-07-08 15:38:16 +08:00
|
|
|
size = len / 2;
|
|
|
|
|
2018-06-08 05:38:25 +08:00
|
|
|
gdb::unique_xmalloc_ptr<gdb_byte> data ((gdb_byte *) xmalloc (size));
|
|
|
|
bin = data.get ();
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
Rename common to gdbsupport
This is the next patch in the ongoing series to move gdbsever to the
top level.
This patch just renames the "common" directory. The idea is to do
this move in two parts: first rename the directory (this patch), then
move the directory to the top. This approach makes the patches a bit
more tractable.
I chose the name "gdbsupport" for the directory. However, as this
patch was largely written by sed, we could pick a new name without too
much difficulty.
Tested by the buildbot.
gdb/ChangeLog
2019-07-09 Tom Tromey <tom@tromey.com>
* contrib/ari/gdb_ari.sh: Change common to gdbsupport.
* configure: Rebuild.
* configure.ac: Change common to gdbsupport.
* gdbsupport: Rename from common.
* acinclude.m4: Change common to gdbsupport.
* Makefile.in (CONFIG_SRC_SUBDIR, COMMON_SFILES)
(HFILES_NO_SRCDIR, stamp-version, ALLDEPFILES): Change common to
gdbsupport.
* aarch64-tdep.c, ada-lang.c, ada-lang.h, agent.c, alloc.c,
amd64-darwin-tdep.c, amd64-dicos-tdep.c, amd64-fbsd-nat.c,
amd64-fbsd-tdep.c, amd64-linux-nat.c, amd64-linux-tdep.c,
amd64-nbsd-tdep.c, amd64-obsd-tdep.c, amd64-sol2-tdep.c,
amd64-tdep.c, amd64-windows-tdep.c, arch-utils.c,
arch/aarch64-insn.c, arch/aarch64.c, arch/aarch64.h, arch/amd64.c,
arch/amd64.h, arch/arm-get-next-pcs.c, arch/arm-linux.c,
arch/arm.c, arch/i386.c, arch/i386.h, arch/ppc-linux-common.c,
arch/riscv.c, arch/riscv.h, arch/tic6x.c, arm-tdep.c, auto-load.c,
auxv.c, ax-gdb.c, ax-general.c, ax.h, breakpoint.c, breakpoint.h,
btrace.c, btrace.h, build-id.c, build-id.h, c-lang.h, charset.c,
charset.h, cli/cli-cmds.c, cli/cli-cmds.h, cli/cli-decode.c,
cli/cli-dump.c, cli/cli-option.h, cli/cli-script.c,
coff-pe-read.c, command.h, compile/compile-c-support.c,
compile/compile-c.h, compile/compile-cplus-symbols.c,
compile/compile-cplus-types.c, compile/compile-cplus.h,
compile/compile-loc2c.c, compile/compile.c, completer.c,
completer.h, contrib/ari/gdb_ari.sh, corefile.c, corelow.c,
cp-support.c, cp-support.h, cp-valprint.c, csky-tdep.c, ctf.c,
darwin-nat.c, debug.c, defs.h, disasm-selftests.c, disasm.c,
disasm.h, dtrace-probe.c, dwarf-index-cache.c,
dwarf-index-cache.h, dwarf-index-write.c, dwarf2-frame.c,
dwarf2expr.c, dwarf2loc.c, dwarf2read.c, event-loop.c,
event-top.c, exceptions.c, exec.c, extension.h, fbsd-nat.c,
features/aarch64-core.c, features/aarch64-fpu.c,
features/aarch64-pauth.c, features/aarch64-sve.c,
features/i386/32bit-avx.c, features/i386/32bit-avx512.c,
features/i386/32bit-core.c, features/i386/32bit-linux.c,
features/i386/32bit-mpx.c, features/i386/32bit-pkeys.c,
features/i386/32bit-segments.c, features/i386/32bit-sse.c,
features/i386/64bit-avx.c, features/i386/64bit-avx512.c,
features/i386/64bit-core.c, features/i386/64bit-linux.c,
features/i386/64bit-mpx.c, features/i386/64bit-pkeys.c,
features/i386/64bit-segments.c, features/i386/64bit-sse.c,
features/i386/x32-core.c, features/riscv/32bit-cpu.c,
features/riscv/32bit-csr.c, features/riscv/32bit-fpu.c,
features/riscv/64bit-cpu.c, features/riscv/64bit-csr.c,
features/riscv/64bit-fpu.c, features/tic6x-c6xp.c,
features/tic6x-core.c, features/tic6x-gp.c, filename-seen-cache.h,
findcmd.c, findvar.c, fork-child.c, gcore.c, gdb_bfd.c, gdb_bfd.h,
gdb_proc_service.h, gdb_regex.c, gdb_select.h, gdb_usleep.c,
gdbarch-selftests.c, gdbthread.h, gdbtypes.h, gnu-nat.c,
go32-nat.c, guile/guile.c, guile/scm-ports.c,
guile/scm-safe-call.c, guile/scm-type.c, i386-fbsd-nat.c,
i386-fbsd-tdep.c, i386-go32-tdep.c, i386-linux-nat.c,
i386-linux-tdep.c, i386-tdep.c, i387-tdep.c,
ia64-libunwind-tdep.c, ia64-linux-nat.c, inf-child.c,
inf-ptrace.c, infcall.c, infcall.h, infcmd.c, inferior-iter.h,
inferior.c, inferior.h, inflow.c, inflow.h, infrun.c, infrun.h,
inline-frame.c, language.h, linespec.c, linux-fork.c, linux-nat.c,
linux-tdep.c, linux-thread-db.c, location.c, machoread.c,
macrotab.h, main.c, maint.c, maint.h, memattr.c, memrange.h,
mi/mi-cmd-break.h, mi/mi-cmd-env.c, mi/mi-cmd-stack.c,
mi/mi-cmd-var.c, mi/mi-interp.c, mi/mi-main.c, mi/mi-parse.h,
minsyms.c, mips-linux-tdep.c, namespace.h,
nat/aarch64-linux-hw-point.c, nat/aarch64-linux-hw-point.h,
nat/aarch64-linux.c, nat/aarch64-sve-linux-ptrace.c,
nat/amd64-linux-siginfo.c, nat/fork-inferior.c,
nat/linux-btrace.c, nat/linux-btrace.h, nat/linux-namespaces.c,
nat/linux-nat.h, nat/linux-osdata.c, nat/linux-personality.c,
nat/linux-procfs.c, nat/linux-ptrace.c, nat/linux-ptrace.h,
nat/linux-waitpid.c, nat/mips-linux-watch.c,
nat/mips-linux-watch.h, nat/ppc-linux.c, nat/x86-dregs.c,
nat/x86-dregs.h, nat/x86-linux-dregs.c, nat/x86-linux.c,
nto-procfs.c, nto-tdep.c, objfile-flags.h, objfiles.c, objfiles.h,
obsd-nat.c, observable.h, osdata.c, p-valprint.c, parse.c,
parser-defs.h, ppc-linux-nat.c, printcmd.c, probe.c, proc-api.c,
procfs.c, producer.c, progspace.h, psymtab.h,
python/py-framefilter.c, python/py-inferior.c, python/py-ref.h,
python/py-type.c, python/python.c, record-btrace.c, record-full.c,
record.c, record.h, regcache-dump.c, regcache.c, regcache.h,
remote-fileio.c, remote-fileio.h, remote-sim.c, remote.c,
riscv-tdep.c, rs6000-aix-tdep.c, rust-exp.y, s12z-tdep.c,
selftest-arch.c, ser-base.c, ser-event.c, ser-pipe.c, ser-tcp.c,
ser-unix.c, skip.c, solib-aix.c, solib-target.c, solib.c,
source-cache.c, source.c, source.h, sparc-nat.c, spu-linux-nat.c,
stack.c, stap-probe.c, symfile-add-flags.h, symfile.c, symfile.h,
symtab.c, symtab.h, target-descriptions.c, target-descriptions.h,
target-memory.c, target.c, target.h, target/waitstatus.c,
target/waitstatus.h, thread-iter.h, thread.c, tilegx-tdep.c,
top.c, top.h, tracefile-tfile.c, tracefile.c, tracepoint.c,
tracepoint.h, tui/tui-io.c, ui-file.c, ui-out.h,
unittests/array-view-selftests.c,
unittests/child-path-selftests.c, unittests/cli-utils-selftests.c,
unittests/common-utils-selftests.c,
unittests/copy_bitwise-selftests.c, unittests/environ-selftests.c,
unittests/format_pieces-selftests.c,
unittests/function-view-selftests.c,
unittests/lookup_name_info-selftests.c,
unittests/memory-map-selftests.c, unittests/memrange-selftests.c,
unittests/mkdir-recursive-selftests.c,
unittests/observable-selftests.c,
unittests/offset-type-selftests.c, unittests/optional-selftests.c,
unittests/parse-connection-spec-selftests.c,
unittests/ptid-selftests.c, unittests/rsp-low-selftests.c,
unittests/scoped_fd-selftests.c,
unittests/scoped_mmap-selftests.c,
unittests/scoped_restore-selftests.c,
unittests/string_view-selftests.c, unittests/style-selftests.c,
unittests/tracepoint-selftests.c, unittests/unpack-selftests.c,
unittests/utils-selftests.c, unittests/xml-utils-selftests.c,
utils.c, utils.h, valarith.c, valops.c, valprint.c, value.c,
value.h, varobj.c, varobj.h, windows-nat.c, x86-linux-nat.c,
xml-support.c, xml-support.h, xml-tdesc.h, xstormy16-tdep.c,
xtensa-linux-nat.c, dwarf2read.h: Change common to gdbsupport.
gdb/gdbserver/ChangeLog
2019-07-09 Tom Tromey <tom@tromey.com>
* configure: Rebuild.
* configure.ac: Change common to gdbsupport.
* acinclude.m4: Change common to gdbsupport.
* Makefile.in (SFILES, OBS, GDBREPLAY_OBS, IPA_OBJS)
(version-generated.c, gdbsupport/%-ipa.o, gdbsupport/%.o): Change
common to gdbsupport.
* ax.c, event-loop.c, fork-child.c, gdb_proc_service.h,
gdbreplay.c, gdbthread.h, hostio-errno.c, hostio.c, i387-fp.c,
inferiors.c, inferiors.h, linux-aarch64-tdesc-selftest.c,
linux-amd64-ipa.c, linux-i386-ipa.c, linux-low.c,
linux-tic6x-low.c, linux-x86-low.c, linux-x86-tdesc-selftest.c,
linux-x86-tdesc.c, lynx-i386-low.c, lynx-low.c, mem-break.h,
nto-x86-low.c, regcache.c, regcache.h, remote-utils.c, server.c,
server.h, spu-low.c, symbol.c, target.h, tdesc.c, tdesc.h,
thread-db.c, tracepoint.c, win32-i386-low.c, win32-low.c: Change
common to gdbsupport.
2019-05-06 10:29:24 +08:00
|
|
|
/* We use hex encoding - see gdbsupport/rsp-low.h. */
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
while (len > 0)
|
|
|
|
{
|
|
|
|
char hi, lo;
|
|
|
|
|
|
|
|
hi = *body_text++;
|
|
|
|
lo = *body_text++;
|
|
|
|
|
|
|
|
if (hi == 0 || lo == 0)
|
|
|
|
gdb_xml_error (parser, _("Bad hex encoding."));
|
|
|
|
|
|
|
|
*bin++ = fromhex (hi) * 16 + fromhex (lo);
|
|
|
|
len -= 2;
|
|
|
|
}
|
|
|
|
|
2018-06-08 05:38:25 +08:00
|
|
|
*pdata = data.release ();
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
*psize = size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse a btrace pt-config "cpu" xml record. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
|
|
|
|
const struct gdb_xml_element *element,
|
|
|
|
void *user_data,
|
2018-01-07 22:29:52 +08:00
|
|
|
std::vector<gdb_xml_value> &attributes)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
|
|
|
struct btrace_data *btrace;
|
|
|
|
const char *vendor;
|
|
|
|
ULONGEST *family, *model, *stepping;
|
|
|
|
|
2018-01-07 22:29:52 +08:00
|
|
|
vendor =
|
|
|
|
(const char *) xml_find_attribute (attributes, "vendor")->value.get ();
|
|
|
|
family
|
|
|
|
= (ULONGEST *) xml_find_attribute (attributes, "family")->value.get ();
|
|
|
|
model
|
|
|
|
= (ULONGEST *) xml_find_attribute (attributes, "model")->value.get ();
|
|
|
|
stepping
|
|
|
|
= (ULONGEST *) xml_find_attribute (attributes, "stepping")->value.get ();
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
Add some more casts (1/2)
Note: I needed to split this patch in two, otherwise it's too big for
the mailing list.
This patch adds explicit casts to situations where a void pointer is
assigned to a pointer to the "real" type. Building in C++ mode requires
those assignments to use an explicit cast. This includes, for example:
- callback arguments (cleanups, comparison functions, ...)
- data attached to some object (objfile, program space, etc) in the form
of a void pointer
- "user data" passed to some function
This patch comes from the commit "(mostly) auto-generated patch to insert
casts needed for C++", taken from Pedro's C++ branch.
Only files built on x86 with --enable-targets=all are modified, so the
native files for other arches will need to be dealt with separately.
I built-tested this with --enable-targets=all and reg-tested. To my
surprise, a test case (selftest.exp) had to be adjusted.
Here's the ChangeLog entry. Again, this was relatively quick to make
despite the length, thanks to David Malcom's script, although I don't
believe it's very useful information in that particular case...
gdb/ChangeLog:
* aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s).
(aarch64_make_stub_cache): Likewise.
(value_of_aarch64_user_reg): Likewise.
* ada-lang.c (ada_inferior_data_cleanup): Likewise.
(get_ada_inferior_data): Likewise.
(get_ada_pspace_data): Likewise.
(ada_pspace_data_cleanup): Likewise.
(ada_complete_symbol_matcher): Likewise.
(ada_exc_search_name_matches): Likewise.
* ada-tasks.c (get_ada_tasks_pspace_data): Likewise.
(get_ada_tasks_inferior_data): Likewise.
* addrmap.c (addrmap_mutable_foreach_worker): Likewise.
(splay_obstack_alloc): Likewise.
(splay_obstack_free): Likewise.
* alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise.
(alpha_linux_collect_gregset): Likewise.
(alpha_linux_supply_fpregset): Likewise.
(alpha_linux_collect_fpregset): Likewise.
* alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise.
* alpha-tdep.c (alpha_lds): Likewise.
(alpha_sts): Likewise.
(alpha_sigtramp_frame_unwind_cache): Likewise.
(alpha_heuristic_frame_unwind_cache): Likewise.
(alpha_supply_int_regs): Likewise.
(alpha_fill_int_regs): Likewise.
(alpha_supply_fp_regs): Likewise.
(alpha_fill_fp_regs): Likewise.
* alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise.
(alphanbsd_aout_supply_gregset): Likewise.
(alphanbsd_supply_gregset): Likewise.
* amd64-linux-tdep.c (amd64_linux_init_abi): Likewise.
(amd64_x32_linux_init_abi): Likewise.
* amd64-nat.c (amd64_supply_native_gregset): Likewise.
(amd64_collect_native_gregset): Likewise.
* amd64-tdep.c (amd64_frame_cache): Likewise.
(amd64_sigtramp_frame_cache): Likewise.
(amd64_epilogue_frame_cache): Likewise.
(amd64_supply_fxsave): Likewise.
(amd64_supply_xsave): Likewise.
(amd64_collect_fxsave): Likewise.
(amd64_collect_xsave): Likewise.
* amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise.
* amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise.
* arm-linux-tdep.c (arm_linux_supply_gregset): Likewise.
(arm_linux_collect_gregset): Likewise.
(arm_linux_supply_nwfpe): Likewise.
(arm_linux_collect_nwfpe): Likewise.
(arm_linux_supply_vfp): Likewise.
(arm_linux_collect_vfp): Likewise.
* arm-tdep.c (arm_find_mapping_symbol): Likewise.
(arm_prologue_unwind_stop_reason): Likewise.
(arm_prologue_this_id): Likewise.
(arm_prologue_prev_register): Likewise.
(arm_exidx_data_free): Likewise.
(arm_find_exidx_entry): Likewise.
(arm_stub_this_id): Likewise.
(arm_m_exception_this_id): Likewise.
(arm_m_exception_prev_register): Likewise.
(arm_normal_frame_base): Likewise.
(gdb_print_insn_arm): Likewise.
(arm_objfile_data_free): Likewise.
(arm_record_special_symbol): Likewise.
(value_of_arm_user_reg): Likewise.
* armbsd-tdep.c (armbsd_supply_fpregset): Likewise.
(armbsd_supply_gregset): Likewise.
* auto-load.c (auto_load_pspace_data_cleanup): Likewise.
(get_auto_load_pspace_data): Likewise.
(hash_loaded_script_entry): Likewise.
(eq_loaded_script_entry): Likewise.
(clear_section_scripts): Likewise.
(collect_matching_scripts): Likewise.
* auxv.c (auxv_inferior_data_cleanup): Likewise.
(get_auxv_inferior_data): Likewise.
* avr-tdep.c (avr_frame_unwind_cache): Likewise.
* ax-general.c (do_free_agent_expr_cleanup): Likewise.
* bfd-target.c (target_bfd_xfer_partial): Likewise.
(target_bfd_xclose): Likewise.
(target_bfd_get_section_table): Likewise.
* bfin-tdep.c (bfin_frame_cache): Likewise.
* block.c (find_block_in_blockvector): Likewise.
(call_site_for_pc): Likewise.
(block_find_non_opaque_type_preferred): Likewise.
* break-catch-sig.c (signal_catchpoint_insert_location): Likewise.
(signal_catchpoint_remove_location): Likewise.
(signal_catchpoint_breakpoint_hit): Likewise.
(signal_catchpoint_print_one): Likewise.
(signal_catchpoint_print_mention): Likewise.
(signal_catchpoint_print_recreate): Likewise.
* break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise.
* breakpoint.c (do_cleanup_counted_command_line): Likewise.
(bp_location_compare_addrs): Likewise.
(get_first_locp_gte_addr): Likewise.
(check_tracepoint_command): Likewise.
(do_map_commands_command): Likewise.
(get_breakpoint_objfile_data): Likewise.
(free_breakpoint_probes): Likewise.
(do_captured_breakpoint_query): Likewise.
(compare_breakpoints): Likewise.
(bp_location_compare): Likewise.
(bpstat_remove_breakpoint_callback): Likewise.
(do_delete_breakpoint_cleanup): Likewise.
* bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise.
(bsd_uthread_set_collect_uthread): Likewise.
(bsd_uthread_activate): Likewise.
(bsd_uthread_fetch_registers): Likewise.
(bsd_uthread_store_registers): Likewise.
* btrace.c (check_xml_btrace_version): Likewise.
(parse_xml_btrace_block): Likewise.
(parse_xml_btrace_pt_config_cpu): Likewise.
(parse_xml_btrace_pt_raw): Likewise.
(parse_xml_btrace_pt): Likewise.
(parse_xml_btrace_conf_bts): Likewise.
(parse_xml_btrace_conf_pt): Likewise.
(do_btrace_data_cleanup): Likewise.
* c-typeprint.c (find_typedef_for_canonicalize): Likewise.
* charset.c (cleanup_iconv): Likewise.
(do_cleanup_iterator): Likewise.
* cli-out.c (cli_uiout_dtor): Likewise.
(cli_table_begin): Likewise.
(cli_table_body): Likewise.
(cli_table_end): Likewise.
(cli_table_header): Likewise.
(cli_begin): Likewise.
(cli_end): Likewise.
(cli_field_int): Likewise.
(cli_field_skip): Likewise.
(cli_field_string): Likewise.
(cli_field_fmt): Likewise.
(cli_spaces): Likewise.
(cli_text): Likewise.
(cli_message): Likewise.
(cli_wrap_hint): Likewise.
(cli_flush): Likewise.
(cli_redirect): Likewise.
(out_field_fmt): Likewise.
(field_separator): Likewise.
(cli_out_set_stream): Likewise.
* cli/cli-cmds.c (compare_symtabs): Likewise.
* cli/cli-dump.c (call_dump_func): Likewise.
(restore_section_callback): Likewise.
* cli/cli-script.c (clear_hook_in_cleanup): Likewise.
(do_restore_user_call_depth): Likewise.
(do_free_command_lines_cleanup): Likewise.
* coff-pe-read.c (get_section_vmas): Likewise.
(pe_as16): Likewise.
(pe_as32): Likewise.
* coffread.c (coff_symfile_read): Likewise.
* common/agent.c (agent_look_up_symbols): Likewise.
* common/filestuff.c (do_close_cleanup): Likewise.
* common/format.c (free_format_pieces_cleanup): Likewise.
* common/vec.c (vec_o_reserve): Likewise.
* compile/compile-c-support.c (print_one_macro): Likewise.
* compile/compile-c-symbols.c (hash_symbol_error): Likewise.
(eq_symbol_error): Likewise.
(del_symbol_error): Likewise.
(error_symbol_once): Likewise.
(gcc_convert_symbol): Likewise.
(gcc_symbol_address): Likewise.
(hash_symname): Likewise.
(eq_symname): Likewise.
* compile/compile-c-types.c (hash_type_map_instance): Likewise.
(eq_type_map_instance): Likewise.
(insert_type): Likewise.
(convert_type): Likewise.
* compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise.
(setup_sections): Likewise.
(link_hash_table_free): Likewise.
(copy_sections): Likewise.
* compile/compile-object-run.c (do_module_cleanup): Likewise.
* compile/compile.c (compile_print_value): Likewise.
(do_rmdir): Likewise.
(cleanup_compile_instance): Likewise.
(cleanup_unlink_file): Likewise.
* completer.c (free_completion_tracker): Likewise.
* corelow.c (add_to_spuid_list): Likewise.
* cp-namespace.c (reset_directive_searched): Likewise.
* cp-support.c (reset_directive_searched): Likewise.
* cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise.
(cris_frame_unwind_cache): Likewise.
* d-lang.c (builtin_d_type): Likewise.
* d-namespace.c (reset_directive_searched): Likewise.
* dbxread.c (dbx_free_symfile_info): Likewise.
(do_free_bincl_list_cleanup): Likewise.
* disasm.c (hash_dis_line_entry): Likewise.
(eq_dis_line_entry): Likewise.
(dis_asm_print_address): Likewise.
(fprintf_disasm): Likewise.
(do_ui_file_delete): Likewise.
* doublest.c (convert_floatformat_to_doublest): Likewise.
* dummy-frame.c (pop_dummy_frame_bpt): Likewise.
(dummy_frame_prev_register): Likewise.
(dummy_frame_this_id): Likewise.
* dwarf2-frame-tailcall.c (cache_hash): Likewise.
(cache_eq): Likewise.
(cache_find): Likewise.
(tailcall_frame_this_id): Likewise.
(dwarf2_tailcall_prev_register_first): Likewise.
(tailcall_frame_prev_register): Likewise.
(tailcall_frame_dealloc_cache): Likewise.
(tailcall_frame_prev_arch): Likewise.
* dwarf2-frame.c (dwarf2_frame_state_free): Likewise.
(dwarf2_frame_set_init_reg): Likewise.
(dwarf2_frame_init_reg): Likewise.
(dwarf2_frame_set_signal_frame_p): Likewise.
(dwarf2_frame_signal_frame_p): Likewise.
(dwarf2_frame_set_adjust_regnum): Likewise.
(dwarf2_frame_adjust_regnum): Likewise.
(clear_pointer_cleanup): Likewise.
(dwarf2_frame_cache): Likewise.
(find_cie): Likewise.
(dwarf2_frame_find_fde): Likewise.
* dwarf2expr.c (dwarf_expr_address_type): Likewise.
(free_dwarf_expr_context_cleanup): Likewise.
* dwarf2loc.c (locexpr_find_frame_base_location): Likewise.
(locexpr_get_frame_base): Likewise.
(loclist_find_frame_base_location): Likewise.
(loclist_get_frame_base): Likewise.
(dwarf_expr_dwarf_call): Likewise.
(dwarf_expr_get_base_type): Likewise.
(dwarf_expr_push_dwarf_reg_entry_value): Likewise.
(dwarf_expr_get_obj_addr): Likewise.
(entry_data_value_coerce_ref): Likewise.
(entry_data_value_copy_closure): Likewise.
(entry_data_value_free_closure): Likewise.
(get_frame_address_in_block_wrapper): Likewise.
(dwarf2_evaluate_property): Likewise.
(dwarf2_compile_property_to_c): Likewise.
(needs_frame_read_addr_from_reg): Likewise.
(needs_frame_get_reg_value): Likewise.
(needs_frame_frame_base): Likewise.
(needs_frame_frame_cfa): Likewise.
(needs_frame_tls_address): Likewise.
(needs_frame_dwarf_call): Likewise.
(needs_dwarf_reg_entry_value): Likewise.
(get_ax_pc): Likewise.
(locexpr_read_variable): Likewise.
(locexpr_read_variable_at_entry): Likewise.
(locexpr_read_needs_frame): Likewise.
(locexpr_describe_location): Likewise.
(locexpr_tracepoint_var_ref): Likewise.
(locexpr_generate_c_location): Likewise.
(loclist_read_variable): Likewise.
(loclist_read_variable_at_entry): Likewise.
(loclist_describe_location): Likewise.
(loclist_tracepoint_var_ref): Likewise.
(loclist_generate_c_location): Likewise.
* dwarf2read.c (line_header_hash_voidp): Likewise.
(line_header_eq_voidp): Likewise.
(dwarf2_has_info): Likewise.
(dwarf2_get_section_info): Likewise.
(locate_dwz_sections): Likewise.
(hash_file_name_entry): Likewise.
(eq_file_name_entry): Likewise.
(delete_file_name_entry): Likewise.
(dw2_setup): Likewise.
(dw2_get_file_names_reader): Likewise.
(dw2_find_pc_sect_compunit_symtab): Likewise.
(hash_signatured_type): Likewise.
(eq_signatured_type): Likewise.
(add_signatured_type_cu_to_table): Likewise.
(create_debug_types_hash_table): Likewise.
(lookup_dwo_signatured_type): Likewise.
(lookup_dwp_signatured_type): Likewise.
(lookup_signatured_type): Likewise.
(hash_type_unit_group): Likewise.
(eq_type_unit_group): Likewise.
(get_type_unit_group): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(sort_tu_by_abbrev_offset): Likewise.
(process_skeletonless_type_unit): Likewise.
(psymtabs_addrmap_cleanup): Likewise.
(dwarf2_read_symtab): Likewise.
(psymtab_to_symtab_1): Likewise.
(die_hash): Likewise.
(die_eq): Likewise.
(load_full_comp_unit_reader): Likewise.
(reset_die_in_process): Likewise.
(free_cu_line_header): Likewise.
(handle_DW_AT_stmt_list): Likewise.
(hash_dwo_file): Likewise.
(eq_dwo_file): Likewise.
(hash_dwo_unit): Likewise.
(eq_dwo_unit): Likewise.
(create_dwo_cu_reader): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(lookup_dwo_unit_in_dwp): Likewise.
(dwarf2_locate_dwo_sections): Likewise.
(dwarf2_locate_common_dwp_sections): Likewise.
(dwarf2_locate_v2_dwp_sections): Likewise.
(hash_dwp_loaded_cutus): Likewise.
(eq_dwp_loaded_cutus): Likewise.
(lookup_dwo_cutu): Likewise.
(abbrev_table_free_cleanup): Likewise.
(dwarf2_free_abbrev_table): Likewise.
(find_partial_die_in_comp_unit): Likewise.
(free_line_header_voidp): Likewise.
(follow_die_offset): Likewise.
(follow_die_sig_1): Likewise.
(free_heap_comp_unit): Likewise.
(free_stack_comp_unit): Likewise.
(dwarf2_free_objfile): Likewise.
(per_cu_offset_and_type_hash): Likewise.
(per_cu_offset_and_type_eq): Likewise.
(get_die_type_at_offset): Likewise.
(partial_die_hash): Likewise.
(partial_die_eq): Likewise.
(dwarf2_per_objfile_free): Likewise.
(hash_strtab_entry): Likewise.
(eq_strtab_entry): Likewise.
(add_string): Likewise.
(hash_symtab_entry): Likewise.
(eq_symtab_entry): Likewise.
(delete_symtab_entry): Likewise.
(cleanup_mapped_symtab): Likewise.
(add_indices_to_cpool): Likewise.
(hash_psymtab_cu_index): Likewise.
(eq_psymtab_cu_index): Likewise.
(add_address_entry_worker): Likewise.
(unlink_if_set): Likewise.
(write_one_signatured_type): Likewise.
(save_gdb_index_command): Likewise.
* elfread.c (elf_symtab_read): Likewise.
(elf_gnu_ifunc_cache_hash): Likewise.
(elf_gnu_ifunc_cache_eq): Likewise.
(elf_gnu_ifunc_record_cache): Likewise.
(elf_gnu_ifunc_resolve_by_cache): Likewise.
(elf_get_probes): Likewise.
(probe_key_free): Likewise.
* f-lang.c (builtin_f_type): Likewise.
* frame-base.c (frame_base_append_sniffer): Likewise.
(frame_base_set_default): Likewise.
(frame_base_find_by_frame): Likewise.
* frame-unwind.c (frame_unwind_prepend_unwinder): Likewise.
(frame_unwind_append_unwinder): Likewise.
(frame_unwind_find_by_frame): Likewise.
* frame.c (frame_addr_hash): Likewise.
(frame_addr_hash_eq): Likewise.
(frame_stash_find): Likewise.
(do_frame_register_read): Likewise.
(unwind_to_current_frame): Likewise.
(frame_cleanup_after_sniffer): Likewise.
* frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise.
* frv-tdep.c (frv_frame_unwind_cache): Likewise.
* ft32-tdep.c (ft32_frame_cache): Likewise.
* gcore.c (do_bfd_delete_cleanup): Likewise.
(gcore_create_callback): Likewise.
* gdb_bfd.c (hash_bfd): Likewise.
(eq_bfd): Likewise.
(gdb_bfd_open): Likewise.
(free_one_bfd_section): Likewise.
(gdb_bfd_ref): Likewise.
(gdb_bfd_unref): Likewise.
(get_section_descriptor): Likewise.
(gdb_bfd_map_section): Likewise.
(gdb_bfd_crc): Likewise.
(gdb_bfd_mark_parent): Likewise.
(gdb_bfd_record_inclusion): Likewise.
(gdb_bfd_requires_relocations): Likewise.
(print_one_bfd): Likewise.
* gdbtypes.c (type_pair_hash): Likewise.
(type_pair_eq): Likewise.
(builtin_type): Likewise.
(objfile_type): Likewise.
* gnu-v3-abi.c (vtable_ptrdiff_type): Likewise.
(vtable_address_point_offset): Likewise.
(gnuv3_get_vtable): Likewise.
(hash_value_and_voffset): Likewise.
(eq_value_and_voffset): Likewise.
(compare_value_and_voffset): Likewise.
(compute_vtable_size): Likewise.
(gnuv3_get_typeid_type): Likewise.
* go-lang.c (builtin_go_type): Likewise.
* guile/scm-block.c (bkscm_hash_block_smob): Likewise.
(bkscm_eq_block_smob): Likewise.
(bkscm_objfile_block_map): Likewise.
(bkscm_del_objfile_blocks): Likewise.
* guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise.
* guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise.
(gdbscm_disasm_print_address): Likewise.
* guile/scm-frame.c (frscm_hash_frame_smob): Likewise.
(frscm_eq_frame_smob): Likewise.
(frscm_inferior_frame_map): Likewise.
(frscm_del_inferior_frames): Likewise.
* guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise.
* guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise.
(ofscm_objfile_smob_from_objfile): Likewise.
* guile/scm-ports.c (ioscm_write): Likewise.
(ioscm_file_port_delete): Likewise.
(ioscm_file_port_rewind): Likewise.
(ioscm_file_port_put): Likewise.
(ioscm_file_port_write): Likewise.
* guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise.
(psscm_pspace_smob_from_pspace): Likewise.
* guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise.
(scscm_recording_unwind_handler): Likewise.
(gdbscm_with_catch): Likewise.
(scscm_call_0_body): Likewise.
(scscm_call_1_body): Likewise.
(scscm_call_2_body): Likewise.
(scscm_call_3_body): Likewise.
(scscm_call_4_body): Likewise.
(scscm_apply_1_body): Likewise.
(scscm_eval_scheme_string): Likewise.
(gdbscm_safe_eval_string): Likewise.
(scscm_source_scheme_script): Likewise.
(gdbscm_safe_source_script): Likewise.
* guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise.
(gdbscm_call_scm_from_stringn): Likewise.
* guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise.
(syscm_eq_symbol_smob): Likewise.
(syscm_get_symbol_map): Likewise.
(syscm_del_objfile_symbols): Likewise.
* guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise.
(stscm_eq_symtab_smob): Likewise.
(stscm_objfile_symtab_map): Likewise.
(stscm_del_objfile_symtabs): Likewise.
* guile/scm-type.c (tyscm_hash_type_smob): Likewise.
(tyscm_eq_type_smob): Likewise.
(tyscm_type_map): Likewise.
(tyscm_copy_type_recursive): Likewise.
(save_objfile_types): Likewise.
* guile/scm-utils.c (extract_arg): Likewise.
* h8300-tdep.c (h8300_frame_cache): Likewise.
* hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise.
* hppa-tdep.c (compare_unwind_entries): Likewise.
(find_unwind_entry): Likewise.
(hppa_frame_cache): Likewise.
(hppa_stub_frame_unwind_cache): Likewise.
* hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise.
* hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise.
(hppaobsd_supply_fpregset): Likewise.
* i386-cygwin-tdep.c (core_process_module_section): Likewise.
* i386-linux-tdep.c (i386_linux_init_abi): Likewise.
* i386-tdep.c (i386_frame_cache): Likewise.
(i386_epilogue_frame_cache): Likewise.
(i386_sigtramp_frame_cache): Likewise.
(i386_supply_gregset): Likewise.
(i386_collect_gregset): Likewise.
(i386_gdbarch_init): Likewise.
* i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise.
(i386obsd_trapframe_cache): Likewise.
* i387-tdep.c (i387_supply_fsave): Likewise.
(i387_collect_fsave): Likewise.
(i387_supply_fxsave): Likewise.
(i387_collect_fxsave): Likewise.
(i387_supply_xsave): Likewise.
(i387_collect_xsave): Likewise.
* ia64-tdep.c (ia64_frame_cache): Likewise.
(ia64_sigtramp_frame_cache): Likewise.
* infcmd.c (attach_command_continuation): Likewise.
(attach_command_continuation_free_args): Likewise.
* inferior.c (restore_inferior): Likewise.
(delete_thread_of_inferior): Likewise.
* inflow.c (inflow_inferior_data_cleanup): Likewise.
(get_inflow_inferior_data): Likewise.
(inflow_inferior_exit): Likewise.
* infrun.c (displaced_step_clear_cleanup): Likewise.
(restore_current_uiout_cleanup): Likewise.
(release_stop_context_cleanup): Likewise.
(do_restore_infcall_suspend_state_cleanup): Likewise.
(do_restore_infcall_control_state_cleanup): Likewise.
(restore_inferior_ptid): Likewise.
* inline-frame.c (block_starting_point_at): Likewise.
* iq2000-tdep.c (iq2000_frame_cache): Likewise.
* jit.c (get_jit_objfile_data): Likewise.
(get_jit_program_space_data): Likewise.
(jit_object_close_impl): Likewise.
(jit_find_objf_with_entry_addr): Likewise.
(jit_breakpoint_deleted): Likewise.
(jit_unwind_reg_set_impl): Likewise.
(jit_unwind_reg_get_impl): Likewise.
(jit_dealloc_cache): Likewise.
(jit_frame_sniffer): Likewise.
(jit_frame_prev_register): Likewise.
(jit_prepend_unwinder): Likewise.
(jit_inferior_exit_hook): Likewise.
(free_objfile_data): Likewise.
* jv-lang.c (jv_per_objfile_free): Likewise.
(get_dynamics_objfile): Likewise.
(get_java_class_symtab): Likewise.
(builtin_java_type): Likewise.
* language.c (language_string_char_type): Likewise.
(language_bool_type): Likewise.
(language_lookup_primitive_type): Likewise.
(language_lookup_primitive_type_as_symbol): Likewise.
* linespec.c (hash_address_entry): Likewise.
(eq_address_entry): Likewise.
(iterate_inline_only): Likewise.
(iterate_name_matcher): Likewise.
(decode_line_2_compare_items): Likewise.
(collect_one_symbol): Likewise.
(compare_symbols): Likewise.
(compare_msymbols): Likewise.
(add_symtabs_to_list): Likewise.
(collect_symbols): Likewise.
(compare_msyms): Likewise.
(add_minsym): Likewise.
(cleanup_linespec_result): Likewise.
* linux-fork.c (inferior_call_waitpid_cleanup): Likewise.
* linux-nat.c (delete_lwp_cleanup): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(resume_stopped_resumed_lwps): Likewise.
* linux-tdep.c (get_linux_gdbarch_data): Likewise.
(invalidate_linux_cache_inf): Likewise.
(get_linux_inferior_data): Likewise.
(linux_find_memory_regions_thunk): Likewise.
(linux_make_mappings_callback): Likewise.
(linux_corefile_thread_callback): Likewise.
(find_mapping_size): Likewise.
* linux-thread-db.c (find_new_threads_callback): Likewise.
* lm32-tdep.c (lm32_frame_cache): Likewise.
* m2-lang.c (builtin_m2_type): Likewise.
* m32c-tdep.c (m32c_analyze_frame_prologue): Likewise.
* m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise.
(m32r_linux_supply_gregset): Likewise.
(m32r_linux_collect_gregset): Likewise.
* m32r-tdep.c (m32r_frame_unwind_cache): Likewise.
* m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise.
* m68k-tdep.c (m68k_frame_cache): Likewise.
* m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise.
(m68kbsd_supply_gregset): Likewise.
* m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise.
* m88k-tdep.c (m88k_frame_cache): Likewise.
(m88k_supply_gregset): Likewise.
gdb/gdbserver/ChangeLog:
* dll.c (match_dll): Add cast(s).
(unloaded_dll): Likewise.
* linux-low.c (second_thread_of_pid_p): Likewise.
(delete_lwp_callback): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(linux_set_resume_request): Likewise.
* server.c (accumulate_file_name_length): Likewise.
(emit_dll_description): Likewise.
(handle_qxfer_threads_worker): Likewise.
(visit_actioned_threads): Likewise.
* thread-db.c (any_thread_of): Likewise.
* tracepoint.c (same_process_p): Likewise.
(match_blocktype): Likewise.
(build_traceframe_info_xml): Likewise.
gdb/testsuite/ChangeLog:
* gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected
source line.
2015-09-26 02:08:07 +08:00
|
|
|
btrace = (struct btrace_data *) user_data;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
|
|
|
|
if (strcmp (vendor, "GenuineIntel") == 0)
|
|
|
|
btrace->variant.pt.config.cpu.vendor = CV_INTEL;
|
|
|
|
|
|
|
|
btrace->variant.pt.config.cpu.family = *family;
|
|
|
|
btrace->variant.pt.config.cpu.model = *model;
|
|
|
|
btrace->variant.pt.config.cpu.stepping = *stepping;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse a btrace pt "raw" xml record. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
|
|
|
|
const struct gdb_xml_element *element,
|
|
|
|
void *user_data, const char *body_text)
|
|
|
|
{
|
|
|
|
struct btrace_data *btrace;
|
|
|
|
|
Add some more casts (1/2)
Note: I needed to split this patch in two, otherwise it's too big for
the mailing list.
This patch adds explicit casts to situations where a void pointer is
assigned to a pointer to the "real" type. Building in C++ mode requires
those assignments to use an explicit cast. This includes, for example:
- callback arguments (cleanups, comparison functions, ...)
- data attached to some object (objfile, program space, etc) in the form
of a void pointer
- "user data" passed to some function
This patch comes from the commit "(mostly) auto-generated patch to insert
casts needed for C++", taken from Pedro's C++ branch.
Only files built on x86 with --enable-targets=all are modified, so the
native files for other arches will need to be dealt with separately.
I built-tested this with --enable-targets=all and reg-tested. To my
surprise, a test case (selftest.exp) had to be adjusted.
Here's the ChangeLog entry. Again, this was relatively quick to make
despite the length, thanks to David Malcom's script, although I don't
believe it's very useful information in that particular case...
gdb/ChangeLog:
* aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s).
(aarch64_make_stub_cache): Likewise.
(value_of_aarch64_user_reg): Likewise.
* ada-lang.c (ada_inferior_data_cleanup): Likewise.
(get_ada_inferior_data): Likewise.
(get_ada_pspace_data): Likewise.
(ada_pspace_data_cleanup): Likewise.
(ada_complete_symbol_matcher): Likewise.
(ada_exc_search_name_matches): Likewise.
* ada-tasks.c (get_ada_tasks_pspace_data): Likewise.
(get_ada_tasks_inferior_data): Likewise.
* addrmap.c (addrmap_mutable_foreach_worker): Likewise.
(splay_obstack_alloc): Likewise.
(splay_obstack_free): Likewise.
* alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise.
(alpha_linux_collect_gregset): Likewise.
(alpha_linux_supply_fpregset): Likewise.
(alpha_linux_collect_fpregset): Likewise.
* alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise.
* alpha-tdep.c (alpha_lds): Likewise.
(alpha_sts): Likewise.
(alpha_sigtramp_frame_unwind_cache): Likewise.
(alpha_heuristic_frame_unwind_cache): Likewise.
(alpha_supply_int_regs): Likewise.
(alpha_fill_int_regs): Likewise.
(alpha_supply_fp_regs): Likewise.
(alpha_fill_fp_regs): Likewise.
* alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise.
(alphanbsd_aout_supply_gregset): Likewise.
(alphanbsd_supply_gregset): Likewise.
* amd64-linux-tdep.c (amd64_linux_init_abi): Likewise.
(amd64_x32_linux_init_abi): Likewise.
* amd64-nat.c (amd64_supply_native_gregset): Likewise.
(amd64_collect_native_gregset): Likewise.
* amd64-tdep.c (amd64_frame_cache): Likewise.
(amd64_sigtramp_frame_cache): Likewise.
(amd64_epilogue_frame_cache): Likewise.
(amd64_supply_fxsave): Likewise.
(amd64_supply_xsave): Likewise.
(amd64_collect_fxsave): Likewise.
(amd64_collect_xsave): Likewise.
* amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise.
* amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise.
* arm-linux-tdep.c (arm_linux_supply_gregset): Likewise.
(arm_linux_collect_gregset): Likewise.
(arm_linux_supply_nwfpe): Likewise.
(arm_linux_collect_nwfpe): Likewise.
(arm_linux_supply_vfp): Likewise.
(arm_linux_collect_vfp): Likewise.
* arm-tdep.c (arm_find_mapping_symbol): Likewise.
(arm_prologue_unwind_stop_reason): Likewise.
(arm_prologue_this_id): Likewise.
(arm_prologue_prev_register): Likewise.
(arm_exidx_data_free): Likewise.
(arm_find_exidx_entry): Likewise.
(arm_stub_this_id): Likewise.
(arm_m_exception_this_id): Likewise.
(arm_m_exception_prev_register): Likewise.
(arm_normal_frame_base): Likewise.
(gdb_print_insn_arm): Likewise.
(arm_objfile_data_free): Likewise.
(arm_record_special_symbol): Likewise.
(value_of_arm_user_reg): Likewise.
* armbsd-tdep.c (armbsd_supply_fpregset): Likewise.
(armbsd_supply_gregset): Likewise.
* auto-load.c (auto_load_pspace_data_cleanup): Likewise.
(get_auto_load_pspace_data): Likewise.
(hash_loaded_script_entry): Likewise.
(eq_loaded_script_entry): Likewise.
(clear_section_scripts): Likewise.
(collect_matching_scripts): Likewise.
* auxv.c (auxv_inferior_data_cleanup): Likewise.
(get_auxv_inferior_data): Likewise.
* avr-tdep.c (avr_frame_unwind_cache): Likewise.
* ax-general.c (do_free_agent_expr_cleanup): Likewise.
* bfd-target.c (target_bfd_xfer_partial): Likewise.
(target_bfd_xclose): Likewise.
(target_bfd_get_section_table): Likewise.
* bfin-tdep.c (bfin_frame_cache): Likewise.
* block.c (find_block_in_blockvector): Likewise.
(call_site_for_pc): Likewise.
(block_find_non_opaque_type_preferred): Likewise.
* break-catch-sig.c (signal_catchpoint_insert_location): Likewise.
(signal_catchpoint_remove_location): Likewise.
(signal_catchpoint_breakpoint_hit): Likewise.
(signal_catchpoint_print_one): Likewise.
(signal_catchpoint_print_mention): Likewise.
(signal_catchpoint_print_recreate): Likewise.
* break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise.
* breakpoint.c (do_cleanup_counted_command_line): Likewise.
(bp_location_compare_addrs): Likewise.
(get_first_locp_gte_addr): Likewise.
(check_tracepoint_command): Likewise.
(do_map_commands_command): Likewise.
(get_breakpoint_objfile_data): Likewise.
(free_breakpoint_probes): Likewise.
(do_captured_breakpoint_query): Likewise.
(compare_breakpoints): Likewise.
(bp_location_compare): Likewise.
(bpstat_remove_breakpoint_callback): Likewise.
(do_delete_breakpoint_cleanup): Likewise.
* bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise.
(bsd_uthread_set_collect_uthread): Likewise.
(bsd_uthread_activate): Likewise.
(bsd_uthread_fetch_registers): Likewise.
(bsd_uthread_store_registers): Likewise.
* btrace.c (check_xml_btrace_version): Likewise.
(parse_xml_btrace_block): Likewise.
(parse_xml_btrace_pt_config_cpu): Likewise.
(parse_xml_btrace_pt_raw): Likewise.
(parse_xml_btrace_pt): Likewise.
(parse_xml_btrace_conf_bts): Likewise.
(parse_xml_btrace_conf_pt): Likewise.
(do_btrace_data_cleanup): Likewise.
* c-typeprint.c (find_typedef_for_canonicalize): Likewise.
* charset.c (cleanup_iconv): Likewise.
(do_cleanup_iterator): Likewise.
* cli-out.c (cli_uiout_dtor): Likewise.
(cli_table_begin): Likewise.
(cli_table_body): Likewise.
(cli_table_end): Likewise.
(cli_table_header): Likewise.
(cli_begin): Likewise.
(cli_end): Likewise.
(cli_field_int): Likewise.
(cli_field_skip): Likewise.
(cli_field_string): Likewise.
(cli_field_fmt): Likewise.
(cli_spaces): Likewise.
(cli_text): Likewise.
(cli_message): Likewise.
(cli_wrap_hint): Likewise.
(cli_flush): Likewise.
(cli_redirect): Likewise.
(out_field_fmt): Likewise.
(field_separator): Likewise.
(cli_out_set_stream): Likewise.
* cli/cli-cmds.c (compare_symtabs): Likewise.
* cli/cli-dump.c (call_dump_func): Likewise.
(restore_section_callback): Likewise.
* cli/cli-script.c (clear_hook_in_cleanup): Likewise.
(do_restore_user_call_depth): Likewise.
(do_free_command_lines_cleanup): Likewise.
* coff-pe-read.c (get_section_vmas): Likewise.
(pe_as16): Likewise.
(pe_as32): Likewise.
* coffread.c (coff_symfile_read): Likewise.
* common/agent.c (agent_look_up_symbols): Likewise.
* common/filestuff.c (do_close_cleanup): Likewise.
* common/format.c (free_format_pieces_cleanup): Likewise.
* common/vec.c (vec_o_reserve): Likewise.
* compile/compile-c-support.c (print_one_macro): Likewise.
* compile/compile-c-symbols.c (hash_symbol_error): Likewise.
(eq_symbol_error): Likewise.
(del_symbol_error): Likewise.
(error_symbol_once): Likewise.
(gcc_convert_symbol): Likewise.
(gcc_symbol_address): Likewise.
(hash_symname): Likewise.
(eq_symname): Likewise.
* compile/compile-c-types.c (hash_type_map_instance): Likewise.
(eq_type_map_instance): Likewise.
(insert_type): Likewise.
(convert_type): Likewise.
* compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise.
(setup_sections): Likewise.
(link_hash_table_free): Likewise.
(copy_sections): Likewise.
* compile/compile-object-run.c (do_module_cleanup): Likewise.
* compile/compile.c (compile_print_value): Likewise.
(do_rmdir): Likewise.
(cleanup_compile_instance): Likewise.
(cleanup_unlink_file): Likewise.
* completer.c (free_completion_tracker): Likewise.
* corelow.c (add_to_spuid_list): Likewise.
* cp-namespace.c (reset_directive_searched): Likewise.
* cp-support.c (reset_directive_searched): Likewise.
* cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise.
(cris_frame_unwind_cache): Likewise.
* d-lang.c (builtin_d_type): Likewise.
* d-namespace.c (reset_directive_searched): Likewise.
* dbxread.c (dbx_free_symfile_info): Likewise.
(do_free_bincl_list_cleanup): Likewise.
* disasm.c (hash_dis_line_entry): Likewise.
(eq_dis_line_entry): Likewise.
(dis_asm_print_address): Likewise.
(fprintf_disasm): Likewise.
(do_ui_file_delete): Likewise.
* doublest.c (convert_floatformat_to_doublest): Likewise.
* dummy-frame.c (pop_dummy_frame_bpt): Likewise.
(dummy_frame_prev_register): Likewise.
(dummy_frame_this_id): Likewise.
* dwarf2-frame-tailcall.c (cache_hash): Likewise.
(cache_eq): Likewise.
(cache_find): Likewise.
(tailcall_frame_this_id): Likewise.
(dwarf2_tailcall_prev_register_first): Likewise.
(tailcall_frame_prev_register): Likewise.
(tailcall_frame_dealloc_cache): Likewise.
(tailcall_frame_prev_arch): Likewise.
* dwarf2-frame.c (dwarf2_frame_state_free): Likewise.
(dwarf2_frame_set_init_reg): Likewise.
(dwarf2_frame_init_reg): Likewise.
(dwarf2_frame_set_signal_frame_p): Likewise.
(dwarf2_frame_signal_frame_p): Likewise.
(dwarf2_frame_set_adjust_regnum): Likewise.
(dwarf2_frame_adjust_regnum): Likewise.
(clear_pointer_cleanup): Likewise.
(dwarf2_frame_cache): Likewise.
(find_cie): Likewise.
(dwarf2_frame_find_fde): Likewise.
* dwarf2expr.c (dwarf_expr_address_type): Likewise.
(free_dwarf_expr_context_cleanup): Likewise.
* dwarf2loc.c (locexpr_find_frame_base_location): Likewise.
(locexpr_get_frame_base): Likewise.
(loclist_find_frame_base_location): Likewise.
(loclist_get_frame_base): Likewise.
(dwarf_expr_dwarf_call): Likewise.
(dwarf_expr_get_base_type): Likewise.
(dwarf_expr_push_dwarf_reg_entry_value): Likewise.
(dwarf_expr_get_obj_addr): Likewise.
(entry_data_value_coerce_ref): Likewise.
(entry_data_value_copy_closure): Likewise.
(entry_data_value_free_closure): Likewise.
(get_frame_address_in_block_wrapper): Likewise.
(dwarf2_evaluate_property): Likewise.
(dwarf2_compile_property_to_c): Likewise.
(needs_frame_read_addr_from_reg): Likewise.
(needs_frame_get_reg_value): Likewise.
(needs_frame_frame_base): Likewise.
(needs_frame_frame_cfa): Likewise.
(needs_frame_tls_address): Likewise.
(needs_frame_dwarf_call): Likewise.
(needs_dwarf_reg_entry_value): Likewise.
(get_ax_pc): Likewise.
(locexpr_read_variable): Likewise.
(locexpr_read_variable_at_entry): Likewise.
(locexpr_read_needs_frame): Likewise.
(locexpr_describe_location): Likewise.
(locexpr_tracepoint_var_ref): Likewise.
(locexpr_generate_c_location): Likewise.
(loclist_read_variable): Likewise.
(loclist_read_variable_at_entry): Likewise.
(loclist_describe_location): Likewise.
(loclist_tracepoint_var_ref): Likewise.
(loclist_generate_c_location): Likewise.
* dwarf2read.c (line_header_hash_voidp): Likewise.
(line_header_eq_voidp): Likewise.
(dwarf2_has_info): Likewise.
(dwarf2_get_section_info): Likewise.
(locate_dwz_sections): Likewise.
(hash_file_name_entry): Likewise.
(eq_file_name_entry): Likewise.
(delete_file_name_entry): Likewise.
(dw2_setup): Likewise.
(dw2_get_file_names_reader): Likewise.
(dw2_find_pc_sect_compunit_symtab): Likewise.
(hash_signatured_type): Likewise.
(eq_signatured_type): Likewise.
(add_signatured_type_cu_to_table): Likewise.
(create_debug_types_hash_table): Likewise.
(lookup_dwo_signatured_type): Likewise.
(lookup_dwp_signatured_type): Likewise.
(lookup_signatured_type): Likewise.
(hash_type_unit_group): Likewise.
(eq_type_unit_group): Likewise.
(get_type_unit_group): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(sort_tu_by_abbrev_offset): Likewise.
(process_skeletonless_type_unit): Likewise.
(psymtabs_addrmap_cleanup): Likewise.
(dwarf2_read_symtab): Likewise.
(psymtab_to_symtab_1): Likewise.
(die_hash): Likewise.
(die_eq): Likewise.
(load_full_comp_unit_reader): Likewise.
(reset_die_in_process): Likewise.
(free_cu_line_header): Likewise.
(handle_DW_AT_stmt_list): Likewise.
(hash_dwo_file): Likewise.
(eq_dwo_file): Likewise.
(hash_dwo_unit): Likewise.
(eq_dwo_unit): Likewise.
(create_dwo_cu_reader): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(lookup_dwo_unit_in_dwp): Likewise.
(dwarf2_locate_dwo_sections): Likewise.
(dwarf2_locate_common_dwp_sections): Likewise.
(dwarf2_locate_v2_dwp_sections): Likewise.
(hash_dwp_loaded_cutus): Likewise.
(eq_dwp_loaded_cutus): Likewise.
(lookup_dwo_cutu): Likewise.
(abbrev_table_free_cleanup): Likewise.
(dwarf2_free_abbrev_table): Likewise.
(find_partial_die_in_comp_unit): Likewise.
(free_line_header_voidp): Likewise.
(follow_die_offset): Likewise.
(follow_die_sig_1): Likewise.
(free_heap_comp_unit): Likewise.
(free_stack_comp_unit): Likewise.
(dwarf2_free_objfile): Likewise.
(per_cu_offset_and_type_hash): Likewise.
(per_cu_offset_and_type_eq): Likewise.
(get_die_type_at_offset): Likewise.
(partial_die_hash): Likewise.
(partial_die_eq): Likewise.
(dwarf2_per_objfile_free): Likewise.
(hash_strtab_entry): Likewise.
(eq_strtab_entry): Likewise.
(add_string): Likewise.
(hash_symtab_entry): Likewise.
(eq_symtab_entry): Likewise.
(delete_symtab_entry): Likewise.
(cleanup_mapped_symtab): Likewise.
(add_indices_to_cpool): Likewise.
(hash_psymtab_cu_index): Likewise.
(eq_psymtab_cu_index): Likewise.
(add_address_entry_worker): Likewise.
(unlink_if_set): Likewise.
(write_one_signatured_type): Likewise.
(save_gdb_index_command): Likewise.
* elfread.c (elf_symtab_read): Likewise.
(elf_gnu_ifunc_cache_hash): Likewise.
(elf_gnu_ifunc_cache_eq): Likewise.
(elf_gnu_ifunc_record_cache): Likewise.
(elf_gnu_ifunc_resolve_by_cache): Likewise.
(elf_get_probes): Likewise.
(probe_key_free): Likewise.
* f-lang.c (builtin_f_type): Likewise.
* frame-base.c (frame_base_append_sniffer): Likewise.
(frame_base_set_default): Likewise.
(frame_base_find_by_frame): Likewise.
* frame-unwind.c (frame_unwind_prepend_unwinder): Likewise.
(frame_unwind_append_unwinder): Likewise.
(frame_unwind_find_by_frame): Likewise.
* frame.c (frame_addr_hash): Likewise.
(frame_addr_hash_eq): Likewise.
(frame_stash_find): Likewise.
(do_frame_register_read): Likewise.
(unwind_to_current_frame): Likewise.
(frame_cleanup_after_sniffer): Likewise.
* frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise.
* frv-tdep.c (frv_frame_unwind_cache): Likewise.
* ft32-tdep.c (ft32_frame_cache): Likewise.
* gcore.c (do_bfd_delete_cleanup): Likewise.
(gcore_create_callback): Likewise.
* gdb_bfd.c (hash_bfd): Likewise.
(eq_bfd): Likewise.
(gdb_bfd_open): Likewise.
(free_one_bfd_section): Likewise.
(gdb_bfd_ref): Likewise.
(gdb_bfd_unref): Likewise.
(get_section_descriptor): Likewise.
(gdb_bfd_map_section): Likewise.
(gdb_bfd_crc): Likewise.
(gdb_bfd_mark_parent): Likewise.
(gdb_bfd_record_inclusion): Likewise.
(gdb_bfd_requires_relocations): Likewise.
(print_one_bfd): Likewise.
* gdbtypes.c (type_pair_hash): Likewise.
(type_pair_eq): Likewise.
(builtin_type): Likewise.
(objfile_type): Likewise.
* gnu-v3-abi.c (vtable_ptrdiff_type): Likewise.
(vtable_address_point_offset): Likewise.
(gnuv3_get_vtable): Likewise.
(hash_value_and_voffset): Likewise.
(eq_value_and_voffset): Likewise.
(compare_value_and_voffset): Likewise.
(compute_vtable_size): Likewise.
(gnuv3_get_typeid_type): Likewise.
* go-lang.c (builtin_go_type): Likewise.
* guile/scm-block.c (bkscm_hash_block_smob): Likewise.
(bkscm_eq_block_smob): Likewise.
(bkscm_objfile_block_map): Likewise.
(bkscm_del_objfile_blocks): Likewise.
* guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise.
* guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise.
(gdbscm_disasm_print_address): Likewise.
* guile/scm-frame.c (frscm_hash_frame_smob): Likewise.
(frscm_eq_frame_smob): Likewise.
(frscm_inferior_frame_map): Likewise.
(frscm_del_inferior_frames): Likewise.
* guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise.
* guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise.
(ofscm_objfile_smob_from_objfile): Likewise.
* guile/scm-ports.c (ioscm_write): Likewise.
(ioscm_file_port_delete): Likewise.
(ioscm_file_port_rewind): Likewise.
(ioscm_file_port_put): Likewise.
(ioscm_file_port_write): Likewise.
* guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise.
(psscm_pspace_smob_from_pspace): Likewise.
* guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise.
(scscm_recording_unwind_handler): Likewise.
(gdbscm_with_catch): Likewise.
(scscm_call_0_body): Likewise.
(scscm_call_1_body): Likewise.
(scscm_call_2_body): Likewise.
(scscm_call_3_body): Likewise.
(scscm_call_4_body): Likewise.
(scscm_apply_1_body): Likewise.
(scscm_eval_scheme_string): Likewise.
(gdbscm_safe_eval_string): Likewise.
(scscm_source_scheme_script): Likewise.
(gdbscm_safe_source_script): Likewise.
* guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise.
(gdbscm_call_scm_from_stringn): Likewise.
* guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise.
(syscm_eq_symbol_smob): Likewise.
(syscm_get_symbol_map): Likewise.
(syscm_del_objfile_symbols): Likewise.
* guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise.
(stscm_eq_symtab_smob): Likewise.
(stscm_objfile_symtab_map): Likewise.
(stscm_del_objfile_symtabs): Likewise.
* guile/scm-type.c (tyscm_hash_type_smob): Likewise.
(tyscm_eq_type_smob): Likewise.
(tyscm_type_map): Likewise.
(tyscm_copy_type_recursive): Likewise.
(save_objfile_types): Likewise.
* guile/scm-utils.c (extract_arg): Likewise.
* h8300-tdep.c (h8300_frame_cache): Likewise.
* hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise.
* hppa-tdep.c (compare_unwind_entries): Likewise.
(find_unwind_entry): Likewise.
(hppa_frame_cache): Likewise.
(hppa_stub_frame_unwind_cache): Likewise.
* hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise.
* hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise.
(hppaobsd_supply_fpregset): Likewise.
* i386-cygwin-tdep.c (core_process_module_section): Likewise.
* i386-linux-tdep.c (i386_linux_init_abi): Likewise.
* i386-tdep.c (i386_frame_cache): Likewise.
(i386_epilogue_frame_cache): Likewise.
(i386_sigtramp_frame_cache): Likewise.
(i386_supply_gregset): Likewise.
(i386_collect_gregset): Likewise.
(i386_gdbarch_init): Likewise.
* i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise.
(i386obsd_trapframe_cache): Likewise.
* i387-tdep.c (i387_supply_fsave): Likewise.
(i387_collect_fsave): Likewise.
(i387_supply_fxsave): Likewise.
(i387_collect_fxsave): Likewise.
(i387_supply_xsave): Likewise.
(i387_collect_xsave): Likewise.
* ia64-tdep.c (ia64_frame_cache): Likewise.
(ia64_sigtramp_frame_cache): Likewise.
* infcmd.c (attach_command_continuation): Likewise.
(attach_command_continuation_free_args): Likewise.
* inferior.c (restore_inferior): Likewise.
(delete_thread_of_inferior): Likewise.
* inflow.c (inflow_inferior_data_cleanup): Likewise.
(get_inflow_inferior_data): Likewise.
(inflow_inferior_exit): Likewise.
* infrun.c (displaced_step_clear_cleanup): Likewise.
(restore_current_uiout_cleanup): Likewise.
(release_stop_context_cleanup): Likewise.
(do_restore_infcall_suspend_state_cleanup): Likewise.
(do_restore_infcall_control_state_cleanup): Likewise.
(restore_inferior_ptid): Likewise.
* inline-frame.c (block_starting_point_at): Likewise.
* iq2000-tdep.c (iq2000_frame_cache): Likewise.
* jit.c (get_jit_objfile_data): Likewise.
(get_jit_program_space_data): Likewise.
(jit_object_close_impl): Likewise.
(jit_find_objf_with_entry_addr): Likewise.
(jit_breakpoint_deleted): Likewise.
(jit_unwind_reg_set_impl): Likewise.
(jit_unwind_reg_get_impl): Likewise.
(jit_dealloc_cache): Likewise.
(jit_frame_sniffer): Likewise.
(jit_frame_prev_register): Likewise.
(jit_prepend_unwinder): Likewise.
(jit_inferior_exit_hook): Likewise.
(free_objfile_data): Likewise.
* jv-lang.c (jv_per_objfile_free): Likewise.
(get_dynamics_objfile): Likewise.
(get_java_class_symtab): Likewise.
(builtin_java_type): Likewise.
* language.c (language_string_char_type): Likewise.
(language_bool_type): Likewise.
(language_lookup_primitive_type): Likewise.
(language_lookup_primitive_type_as_symbol): Likewise.
* linespec.c (hash_address_entry): Likewise.
(eq_address_entry): Likewise.
(iterate_inline_only): Likewise.
(iterate_name_matcher): Likewise.
(decode_line_2_compare_items): Likewise.
(collect_one_symbol): Likewise.
(compare_symbols): Likewise.
(compare_msymbols): Likewise.
(add_symtabs_to_list): Likewise.
(collect_symbols): Likewise.
(compare_msyms): Likewise.
(add_minsym): Likewise.
(cleanup_linespec_result): Likewise.
* linux-fork.c (inferior_call_waitpid_cleanup): Likewise.
* linux-nat.c (delete_lwp_cleanup): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(resume_stopped_resumed_lwps): Likewise.
* linux-tdep.c (get_linux_gdbarch_data): Likewise.
(invalidate_linux_cache_inf): Likewise.
(get_linux_inferior_data): Likewise.
(linux_find_memory_regions_thunk): Likewise.
(linux_make_mappings_callback): Likewise.
(linux_corefile_thread_callback): Likewise.
(find_mapping_size): Likewise.
* linux-thread-db.c (find_new_threads_callback): Likewise.
* lm32-tdep.c (lm32_frame_cache): Likewise.
* m2-lang.c (builtin_m2_type): Likewise.
* m32c-tdep.c (m32c_analyze_frame_prologue): Likewise.
* m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise.
(m32r_linux_supply_gregset): Likewise.
(m32r_linux_collect_gregset): Likewise.
* m32r-tdep.c (m32r_frame_unwind_cache): Likewise.
* m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise.
* m68k-tdep.c (m68k_frame_cache): Likewise.
* m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise.
(m68kbsd_supply_gregset): Likewise.
* m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise.
* m88k-tdep.c (m88k_frame_cache): Likewise.
(m88k_supply_gregset): Likewise.
gdb/gdbserver/ChangeLog:
* dll.c (match_dll): Add cast(s).
(unloaded_dll): Likewise.
* linux-low.c (second_thread_of_pid_p): Likewise.
(delete_lwp_callback): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(linux_set_resume_request): Likewise.
* server.c (accumulate_file_name_length): Likewise.
(emit_dll_description): Likewise.
(handle_qxfer_threads_worker): Likewise.
(visit_actioned_threads): Likewise.
* thread-db.c (any_thread_of): Likewise.
* tracepoint.c (same_process_p): Likewise.
(match_blocktype): Likewise.
(build_traceframe_info_xml): Likewise.
gdb/testsuite/ChangeLog:
* gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected
source line.
2015-09-26 02:08:07 +08:00
|
|
|
btrace = (struct btrace_data *) user_data;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
|
|
|
|
&btrace->variant.pt.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse a btrace "pt" xml record. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
parse_xml_btrace_pt (struct gdb_xml_parser *parser,
|
|
|
|
const struct gdb_xml_element *element,
|
2018-01-07 22:29:52 +08:00
|
|
|
void *user_data,
|
|
|
|
std::vector<gdb_xml_value> &attributes)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
|
|
|
struct btrace_data *btrace;
|
|
|
|
|
Add some more casts (1/2)
Note: I needed to split this patch in two, otherwise it's too big for
the mailing list.
This patch adds explicit casts to situations where a void pointer is
assigned to a pointer to the "real" type. Building in C++ mode requires
those assignments to use an explicit cast. This includes, for example:
- callback arguments (cleanups, comparison functions, ...)
- data attached to some object (objfile, program space, etc) in the form
of a void pointer
- "user data" passed to some function
This patch comes from the commit "(mostly) auto-generated patch to insert
casts needed for C++", taken from Pedro's C++ branch.
Only files built on x86 with --enable-targets=all are modified, so the
native files for other arches will need to be dealt with separately.
I built-tested this with --enable-targets=all and reg-tested. To my
surprise, a test case (selftest.exp) had to be adjusted.
Here's the ChangeLog entry. Again, this was relatively quick to make
despite the length, thanks to David Malcom's script, although I don't
believe it's very useful information in that particular case...
gdb/ChangeLog:
* aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s).
(aarch64_make_stub_cache): Likewise.
(value_of_aarch64_user_reg): Likewise.
* ada-lang.c (ada_inferior_data_cleanup): Likewise.
(get_ada_inferior_data): Likewise.
(get_ada_pspace_data): Likewise.
(ada_pspace_data_cleanup): Likewise.
(ada_complete_symbol_matcher): Likewise.
(ada_exc_search_name_matches): Likewise.
* ada-tasks.c (get_ada_tasks_pspace_data): Likewise.
(get_ada_tasks_inferior_data): Likewise.
* addrmap.c (addrmap_mutable_foreach_worker): Likewise.
(splay_obstack_alloc): Likewise.
(splay_obstack_free): Likewise.
* alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise.
(alpha_linux_collect_gregset): Likewise.
(alpha_linux_supply_fpregset): Likewise.
(alpha_linux_collect_fpregset): Likewise.
* alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise.
* alpha-tdep.c (alpha_lds): Likewise.
(alpha_sts): Likewise.
(alpha_sigtramp_frame_unwind_cache): Likewise.
(alpha_heuristic_frame_unwind_cache): Likewise.
(alpha_supply_int_regs): Likewise.
(alpha_fill_int_regs): Likewise.
(alpha_supply_fp_regs): Likewise.
(alpha_fill_fp_regs): Likewise.
* alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise.
(alphanbsd_aout_supply_gregset): Likewise.
(alphanbsd_supply_gregset): Likewise.
* amd64-linux-tdep.c (amd64_linux_init_abi): Likewise.
(amd64_x32_linux_init_abi): Likewise.
* amd64-nat.c (amd64_supply_native_gregset): Likewise.
(amd64_collect_native_gregset): Likewise.
* amd64-tdep.c (amd64_frame_cache): Likewise.
(amd64_sigtramp_frame_cache): Likewise.
(amd64_epilogue_frame_cache): Likewise.
(amd64_supply_fxsave): Likewise.
(amd64_supply_xsave): Likewise.
(amd64_collect_fxsave): Likewise.
(amd64_collect_xsave): Likewise.
* amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise.
* amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise.
* arm-linux-tdep.c (arm_linux_supply_gregset): Likewise.
(arm_linux_collect_gregset): Likewise.
(arm_linux_supply_nwfpe): Likewise.
(arm_linux_collect_nwfpe): Likewise.
(arm_linux_supply_vfp): Likewise.
(arm_linux_collect_vfp): Likewise.
* arm-tdep.c (arm_find_mapping_symbol): Likewise.
(arm_prologue_unwind_stop_reason): Likewise.
(arm_prologue_this_id): Likewise.
(arm_prologue_prev_register): Likewise.
(arm_exidx_data_free): Likewise.
(arm_find_exidx_entry): Likewise.
(arm_stub_this_id): Likewise.
(arm_m_exception_this_id): Likewise.
(arm_m_exception_prev_register): Likewise.
(arm_normal_frame_base): Likewise.
(gdb_print_insn_arm): Likewise.
(arm_objfile_data_free): Likewise.
(arm_record_special_symbol): Likewise.
(value_of_arm_user_reg): Likewise.
* armbsd-tdep.c (armbsd_supply_fpregset): Likewise.
(armbsd_supply_gregset): Likewise.
* auto-load.c (auto_load_pspace_data_cleanup): Likewise.
(get_auto_load_pspace_data): Likewise.
(hash_loaded_script_entry): Likewise.
(eq_loaded_script_entry): Likewise.
(clear_section_scripts): Likewise.
(collect_matching_scripts): Likewise.
* auxv.c (auxv_inferior_data_cleanup): Likewise.
(get_auxv_inferior_data): Likewise.
* avr-tdep.c (avr_frame_unwind_cache): Likewise.
* ax-general.c (do_free_agent_expr_cleanup): Likewise.
* bfd-target.c (target_bfd_xfer_partial): Likewise.
(target_bfd_xclose): Likewise.
(target_bfd_get_section_table): Likewise.
* bfin-tdep.c (bfin_frame_cache): Likewise.
* block.c (find_block_in_blockvector): Likewise.
(call_site_for_pc): Likewise.
(block_find_non_opaque_type_preferred): Likewise.
* break-catch-sig.c (signal_catchpoint_insert_location): Likewise.
(signal_catchpoint_remove_location): Likewise.
(signal_catchpoint_breakpoint_hit): Likewise.
(signal_catchpoint_print_one): Likewise.
(signal_catchpoint_print_mention): Likewise.
(signal_catchpoint_print_recreate): Likewise.
* break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise.
* breakpoint.c (do_cleanup_counted_command_line): Likewise.
(bp_location_compare_addrs): Likewise.
(get_first_locp_gte_addr): Likewise.
(check_tracepoint_command): Likewise.
(do_map_commands_command): Likewise.
(get_breakpoint_objfile_data): Likewise.
(free_breakpoint_probes): Likewise.
(do_captured_breakpoint_query): Likewise.
(compare_breakpoints): Likewise.
(bp_location_compare): Likewise.
(bpstat_remove_breakpoint_callback): Likewise.
(do_delete_breakpoint_cleanup): Likewise.
* bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise.
(bsd_uthread_set_collect_uthread): Likewise.
(bsd_uthread_activate): Likewise.
(bsd_uthread_fetch_registers): Likewise.
(bsd_uthread_store_registers): Likewise.
* btrace.c (check_xml_btrace_version): Likewise.
(parse_xml_btrace_block): Likewise.
(parse_xml_btrace_pt_config_cpu): Likewise.
(parse_xml_btrace_pt_raw): Likewise.
(parse_xml_btrace_pt): Likewise.
(parse_xml_btrace_conf_bts): Likewise.
(parse_xml_btrace_conf_pt): Likewise.
(do_btrace_data_cleanup): Likewise.
* c-typeprint.c (find_typedef_for_canonicalize): Likewise.
* charset.c (cleanup_iconv): Likewise.
(do_cleanup_iterator): Likewise.
* cli-out.c (cli_uiout_dtor): Likewise.
(cli_table_begin): Likewise.
(cli_table_body): Likewise.
(cli_table_end): Likewise.
(cli_table_header): Likewise.
(cli_begin): Likewise.
(cli_end): Likewise.
(cli_field_int): Likewise.
(cli_field_skip): Likewise.
(cli_field_string): Likewise.
(cli_field_fmt): Likewise.
(cli_spaces): Likewise.
(cli_text): Likewise.
(cli_message): Likewise.
(cli_wrap_hint): Likewise.
(cli_flush): Likewise.
(cli_redirect): Likewise.
(out_field_fmt): Likewise.
(field_separator): Likewise.
(cli_out_set_stream): Likewise.
* cli/cli-cmds.c (compare_symtabs): Likewise.
* cli/cli-dump.c (call_dump_func): Likewise.
(restore_section_callback): Likewise.
* cli/cli-script.c (clear_hook_in_cleanup): Likewise.
(do_restore_user_call_depth): Likewise.
(do_free_command_lines_cleanup): Likewise.
* coff-pe-read.c (get_section_vmas): Likewise.
(pe_as16): Likewise.
(pe_as32): Likewise.
* coffread.c (coff_symfile_read): Likewise.
* common/agent.c (agent_look_up_symbols): Likewise.
* common/filestuff.c (do_close_cleanup): Likewise.
* common/format.c (free_format_pieces_cleanup): Likewise.
* common/vec.c (vec_o_reserve): Likewise.
* compile/compile-c-support.c (print_one_macro): Likewise.
* compile/compile-c-symbols.c (hash_symbol_error): Likewise.
(eq_symbol_error): Likewise.
(del_symbol_error): Likewise.
(error_symbol_once): Likewise.
(gcc_convert_symbol): Likewise.
(gcc_symbol_address): Likewise.
(hash_symname): Likewise.
(eq_symname): Likewise.
* compile/compile-c-types.c (hash_type_map_instance): Likewise.
(eq_type_map_instance): Likewise.
(insert_type): Likewise.
(convert_type): Likewise.
* compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise.
(setup_sections): Likewise.
(link_hash_table_free): Likewise.
(copy_sections): Likewise.
* compile/compile-object-run.c (do_module_cleanup): Likewise.
* compile/compile.c (compile_print_value): Likewise.
(do_rmdir): Likewise.
(cleanup_compile_instance): Likewise.
(cleanup_unlink_file): Likewise.
* completer.c (free_completion_tracker): Likewise.
* corelow.c (add_to_spuid_list): Likewise.
* cp-namespace.c (reset_directive_searched): Likewise.
* cp-support.c (reset_directive_searched): Likewise.
* cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise.
(cris_frame_unwind_cache): Likewise.
* d-lang.c (builtin_d_type): Likewise.
* d-namespace.c (reset_directive_searched): Likewise.
* dbxread.c (dbx_free_symfile_info): Likewise.
(do_free_bincl_list_cleanup): Likewise.
* disasm.c (hash_dis_line_entry): Likewise.
(eq_dis_line_entry): Likewise.
(dis_asm_print_address): Likewise.
(fprintf_disasm): Likewise.
(do_ui_file_delete): Likewise.
* doublest.c (convert_floatformat_to_doublest): Likewise.
* dummy-frame.c (pop_dummy_frame_bpt): Likewise.
(dummy_frame_prev_register): Likewise.
(dummy_frame_this_id): Likewise.
* dwarf2-frame-tailcall.c (cache_hash): Likewise.
(cache_eq): Likewise.
(cache_find): Likewise.
(tailcall_frame_this_id): Likewise.
(dwarf2_tailcall_prev_register_first): Likewise.
(tailcall_frame_prev_register): Likewise.
(tailcall_frame_dealloc_cache): Likewise.
(tailcall_frame_prev_arch): Likewise.
* dwarf2-frame.c (dwarf2_frame_state_free): Likewise.
(dwarf2_frame_set_init_reg): Likewise.
(dwarf2_frame_init_reg): Likewise.
(dwarf2_frame_set_signal_frame_p): Likewise.
(dwarf2_frame_signal_frame_p): Likewise.
(dwarf2_frame_set_adjust_regnum): Likewise.
(dwarf2_frame_adjust_regnum): Likewise.
(clear_pointer_cleanup): Likewise.
(dwarf2_frame_cache): Likewise.
(find_cie): Likewise.
(dwarf2_frame_find_fde): Likewise.
* dwarf2expr.c (dwarf_expr_address_type): Likewise.
(free_dwarf_expr_context_cleanup): Likewise.
* dwarf2loc.c (locexpr_find_frame_base_location): Likewise.
(locexpr_get_frame_base): Likewise.
(loclist_find_frame_base_location): Likewise.
(loclist_get_frame_base): Likewise.
(dwarf_expr_dwarf_call): Likewise.
(dwarf_expr_get_base_type): Likewise.
(dwarf_expr_push_dwarf_reg_entry_value): Likewise.
(dwarf_expr_get_obj_addr): Likewise.
(entry_data_value_coerce_ref): Likewise.
(entry_data_value_copy_closure): Likewise.
(entry_data_value_free_closure): Likewise.
(get_frame_address_in_block_wrapper): Likewise.
(dwarf2_evaluate_property): Likewise.
(dwarf2_compile_property_to_c): Likewise.
(needs_frame_read_addr_from_reg): Likewise.
(needs_frame_get_reg_value): Likewise.
(needs_frame_frame_base): Likewise.
(needs_frame_frame_cfa): Likewise.
(needs_frame_tls_address): Likewise.
(needs_frame_dwarf_call): Likewise.
(needs_dwarf_reg_entry_value): Likewise.
(get_ax_pc): Likewise.
(locexpr_read_variable): Likewise.
(locexpr_read_variable_at_entry): Likewise.
(locexpr_read_needs_frame): Likewise.
(locexpr_describe_location): Likewise.
(locexpr_tracepoint_var_ref): Likewise.
(locexpr_generate_c_location): Likewise.
(loclist_read_variable): Likewise.
(loclist_read_variable_at_entry): Likewise.
(loclist_describe_location): Likewise.
(loclist_tracepoint_var_ref): Likewise.
(loclist_generate_c_location): Likewise.
* dwarf2read.c (line_header_hash_voidp): Likewise.
(line_header_eq_voidp): Likewise.
(dwarf2_has_info): Likewise.
(dwarf2_get_section_info): Likewise.
(locate_dwz_sections): Likewise.
(hash_file_name_entry): Likewise.
(eq_file_name_entry): Likewise.
(delete_file_name_entry): Likewise.
(dw2_setup): Likewise.
(dw2_get_file_names_reader): Likewise.
(dw2_find_pc_sect_compunit_symtab): Likewise.
(hash_signatured_type): Likewise.
(eq_signatured_type): Likewise.
(add_signatured_type_cu_to_table): Likewise.
(create_debug_types_hash_table): Likewise.
(lookup_dwo_signatured_type): Likewise.
(lookup_dwp_signatured_type): Likewise.
(lookup_signatured_type): Likewise.
(hash_type_unit_group): Likewise.
(eq_type_unit_group): Likewise.
(get_type_unit_group): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(sort_tu_by_abbrev_offset): Likewise.
(process_skeletonless_type_unit): Likewise.
(psymtabs_addrmap_cleanup): Likewise.
(dwarf2_read_symtab): Likewise.
(psymtab_to_symtab_1): Likewise.
(die_hash): Likewise.
(die_eq): Likewise.
(load_full_comp_unit_reader): Likewise.
(reset_die_in_process): Likewise.
(free_cu_line_header): Likewise.
(handle_DW_AT_stmt_list): Likewise.
(hash_dwo_file): Likewise.
(eq_dwo_file): Likewise.
(hash_dwo_unit): Likewise.
(eq_dwo_unit): Likewise.
(create_dwo_cu_reader): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(lookup_dwo_unit_in_dwp): Likewise.
(dwarf2_locate_dwo_sections): Likewise.
(dwarf2_locate_common_dwp_sections): Likewise.
(dwarf2_locate_v2_dwp_sections): Likewise.
(hash_dwp_loaded_cutus): Likewise.
(eq_dwp_loaded_cutus): Likewise.
(lookup_dwo_cutu): Likewise.
(abbrev_table_free_cleanup): Likewise.
(dwarf2_free_abbrev_table): Likewise.
(find_partial_die_in_comp_unit): Likewise.
(free_line_header_voidp): Likewise.
(follow_die_offset): Likewise.
(follow_die_sig_1): Likewise.
(free_heap_comp_unit): Likewise.
(free_stack_comp_unit): Likewise.
(dwarf2_free_objfile): Likewise.
(per_cu_offset_and_type_hash): Likewise.
(per_cu_offset_and_type_eq): Likewise.
(get_die_type_at_offset): Likewise.
(partial_die_hash): Likewise.
(partial_die_eq): Likewise.
(dwarf2_per_objfile_free): Likewise.
(hash_strtab_entry): Likewise.
(eq_strtab_entry): Likewise.
(add_string): Likewise.
(hash_symtab_entry): Likewise.
(eq_symtab_entry): Likewise.
(delete_symtab_entry): Likewise.
(cleanup_mapped_symtab): Likewise.
(add_indices_to_cpool): Likewise.
(hash_psymtab_cu_index): Likewise.
(eq_psymtab_cu_index): Likewise.
(add_address_entry_worker): Likewise.
(unlink_if_set): Likewise.
(write_one_signatured_type): Likewise.
(save_gdb_index_command): Likewise.
* elfread.c (elf_symtab_read): Likewise.
(elf_gnu_ifunc_cache_hash): Likewise.
(elf_gnu_ifunc_cache_eq): Likewise.
(elf_gnu_ifunc_record_cache): Likewise.
(elf_gnu_ifunc_resolve_by_cache): Likewise.
(elf_get_probes): Likewise.
(probe_key_free): Likewise.
* f-lang.c (builtin_f_type): Likewise.
* frame-base.c (frame_base_append_sniffer): Likewise.
(frame_base_set_default): Likewise.
(frame_base_find_by_frame): Likewise.
* frame-unwind.c (frame_unwind_prepend_unwinder): Likewise.
(frame_unwind_append_unwinder): Likewise.
(frame_unwind_find_by_frame): Likewise.
* frame.c (frame_addr_hash): Likewise.
(frame_addr_hash_eq): Likewise.
(frame_stash_find): Likewise.
(do_frame_register_read): Likewise.
(unwind_to_current_frame): Likewise.
(frame_cleanup_after_sniffer): Likewise.
* frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise.
* frv-tdep.c (frv_frame_unwind_cache): Likewise.
* ft32-tdep.c (ft32_frame_cache): Likewise.
* gcore.c (do_bfd_delete_cleanup): Likewise.
(gcore_create_callback): Likewise.
* gdb_bfd.c (hash_bfd): Likewise.
(eq_bfd): Likewise.
(gdb_bfd_open): Likewise.
(free_one_bfd_section): Likewise.
(gdb_bfd_ref): Likewise.
(gdb_bfd_unref): Likewise.
(get_section_descriptor): Likewise.
(gdb_bfd_map_section): Likewise.
(gdb_bfd_crc): Likewise.
(gdb_bfd_mark_parent): Likewise.
(gdb_bfd_record_inclusion): Likewise.
(gdb_bfd_requires_relocations): Likewise.
(print_one_bfd): Likewise.
* gdbtypes.c (type_pair_hash): Likewise.
(type_pair_eq): Likewise.
(builtin_type): Likewise.
(objfile_type): Likewise.
* gnu-v3-abi.c (vtable_ptrdiff_type): Likewise.
(vtable_address_point_offset): Likewise.
(gnuv3_get_vtable): Likewise.
(hash_value_and_voffset): Likewise.
(eq_value_and_voffset): Likewise.
(compare_value_and_voffset): Likewise.
(compute_vtable_size): Likewise.
(gnuv3_get_typeid_type): Likewise.
* go-lang.c (builtin_go_type): Likewise.
* guile/scm-block.c (bkscm_hash_block_smob): Likewise.
(bkscm_eq_block_smob): Likewise.
(bkscm_objfile_block_map): Likewise.
(bkscm_del_objfile_blocks): Likewise.
* guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise.
* guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise.
(gdbscm_disasm_print_address): Likewise.
* guile/scm-frame.c (frscm_hash_frame_smob): Likewise.
(frscm_eq_frame_smob): Likewise.
(frscm_inferior_frame_map): Likewise.
(frscm_del_inferior_frames): Likewise.
* guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise.
* guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise.
(ofscm_objfile_smob_from_objfile): Likewise.
* guile/scm-ports.c (ioscm_write): Likewise.
(ioscm_file_port_delete): Likewise.
(ioscm_file_port_rewind): Likewise.
(ioscm_file_port_put): Likewise.
(ioscm_file_port_write): Likewise.
* guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise.
(psscm_pspace_smob_from_pspace): Likewise.
* guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise.
(scscm_recording_unwind_handler): Likewise.
(gdbscm_with_catch): Likewise.
(scscm_call_0_body): Likewise.
(scscm_call_1_body): Likewise.
(scscm_call_2_body): Likewise.
(scscm_call_3_body): Likewise.
(scscm_call_4_body): Likewise.
(scscm_apply_1_body): Likewise.
(scscm_eval_scheme_string): Likewise.
(gdbscm_safe_eval_string): Likewise.
(scscm_source_scheme_script): Likewise.
(gdbscm_safe_source_script): Likewise.
* guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise.
(gdbscm_call_scm_from_stringn): Likewise.
* guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise.
(syscm_eq_symbol_smob): Likewise.
(syscm_get_symbol_map): Likewise.
(syscm_del_objfile_symbols): Likewise.
* guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise.
(stscm_eq_symtab_smob): Likewise.
(stscm_objfile_symtab_map): Likewise.
(stscm_del_objfile_symtabs): Likewise.
* guile/scm-type.c (tyscm_hash_type_smob): Likewise.
(tyscm_eq_type_smob): Likewise.
(tyscm_type_map): Likewise.
(tyscm_copy_type_recursive): Likewise.
(save_objfile_types): Likewise.
* guile/scm-utils.c (extract_arg): Likewise.
* h8300-tdep.c (h8300_frame_cache): Likewise.
* hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise.
* hppa-tdep.c (compare_unwind_entries): Likewise.
(find_unwind_entry): Likewise.
(hppa_frame_cache): Likewise.
(hppa_stub_frame_unwind_cache): Likewise.
* hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise.
* hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise.
(hppaobsd_supply_fpregset): Likewise.
* i386-cygwin-tdep.c (core_process_module_section): Likewise.
* i386-linux-tdep.c (i386_linux_init_abi): Likewise.
* i386-tdep.c (i386_frame_cache): Likewise.
(i386_epilogue_frame_cache): Likewise.
(i386_sigtramp_frame_cache): Likewise.
(i386_supply_gregset): Likewise.
(i386_collect_gregset): Likewise.
(i386_gdbarch_init): Likewise.
* i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise.
(i386obsd_trapframe_cache): Likewise.
* i387-tdep.c (i387_supply_fsave): Likewise.
(i387_collect_fsave): Likewise.
(i387_supply_fxsave): Likewise.
(i387_collect_fxsave): Likewise.
(i387_supply_xsave): Likewise.
(i387_collect_xsave): Likewise.
* ia64-tdep.c (ia64_frame_cache): Likewise.
(ia64_sigtramp_frame_cache): Likewise.
* infcmd.c (attach_command_continuation): Likewise.
(attach_command_continuation_free_args): Likewise.
* inferior.c (restore_inferior): Likewise.
(delete_thread_of_inferior): Likewise.
* inflow.c (inflow_inferior_data_cleanup): Likewise.
(get_inflow_inferior_data): Likewise.
(inflow_inferior_exit): Likewise.
* infrun.c (displaced_step_clear_cleanup): Likewise.
(restore_current_uiout_cleanup): Likewise.
(release_stop_context_cleanup): Likewise.
(do_restore_infcall_suspend_state_cleanup): Likewise.
(do_restore_infcall_control_state_cleanup): Likewise.
(restore_inferior_ptid): Likewise.
* inline-frame.c (block_starting_point_at): Likewise.
* iq2000-tdep.c (iq2000_frame_cache): Likewise.
* jit.c (get_jit_objfile_data): Likewise.
(get_jit_program_space_data): Likewise.
(jit_object_close_impl): Likewise.
(jit_find_objf_with_entry_addr): Likewise.
(jit_breakpoint_deleted): Likewise.
(jit_unwind_reg_set_impl): Likewise.
(jit_unwind_reg_get_impl): Likewise.
(jit_dealloc_cache): Likewise.
(jit_frame_sniffer): Likewise.
(jit_frame_prev_register): Likewise.
(jit_prepend_unwinder): Likewise.
(jit_inferior_exit_hook): Likewise.
(free_objfile_data): Likewise.
* jv-lang.c (jv_per_objfile_free): Likewise.
(get_dynamics_objfile): Likewise.
(get_java_class_symtab): Likewise.
(builtin_java_type): Likewise.
* language.c (language_string_char_type): Likewise.
(language_bool_type): Likewise.
(language_lookup_primitive_type): Likewise.
(language_lookup_primitive_type_as_symbol): Likewise.
* linespec.c (hash_address_entry): Likewise.
(eq_address_entry): Likewise.
(iterate_inline_only): Likewise.
(iterate_name_matcher): Likewise.
(decode_line_2_compare_items): Likewise.
(collect_one_symbol): Likewise.
(compare_symbols): Likewise.
(compare_msymbols): Likewise.
(add_symtabs_to_list): Likewise.
(collect_symbols): Likewise.
(compare_msyms): Likewise.
(add_minsym): Likewise.
(cleanup_linespec_result): Likewise.
* linux-fork.c (inferior_call_waitpid_cleanup): Likewise.
* linux-nat.c (delete_lwp_cleanup): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(resume_stopped_resumed_lwps): Likewise.
* linux-tdep.c (get_linux_gdbarch_data): Likewise.
(invalidate_linux_cache_inf): Likewise.
(get_linux_inferior_data): Likewise.
(linux_find_memory_regions_thunk): Likewise.
(linux_make_mappings_callback): Likewise.
(linux_corefile_thread_callback): Likewise.
(find_mapping_size): Likewise.
* linux-thread-db.c (find_new_threads_callback): Likewise.
* lm32-tdep.c (lm32_frame_cache): Likewise.
* m2-lang.c (builtin_m2_type): Likewise.
* m32c-tdep.c (m32c_analyze_frame_prologue): Likewise.
* m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise.
(m32r_linux_supply_gregset): Likewise.
(m32r_linux_collect_gregset): Likewise.
* m32r-tdep.c (m32r_frame_unwind_cache): Likewise.
* m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise.
* m68k-tdep.c (m68k_frame_cache): Likewise.
* m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise.
(m68kbsd_supply_gregset): Likewise.
* m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise.
* m88k-tdep.c (m88k_frame_cache): Likewise.
(m88k_supply_gregset): Likewise.
gdb/gdbserver/ChangeLog:
* dll.c (match_dll): Add cast(s).
(unloaded_dll): Likewise.
* linux-low.c (second_thread_of_pid_p): Likewise.
(delete_lwp_callback): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(linux_set_resume_request): Likewise.
* server.c (accumulate_file_name_length): Likewise.
(emit_dll_description): Likewise.
(handle_qxfer_threads_worker): Likewise.
(visit_actioned_threads): Likewise.
* thread-db.c (any_thread_of): Likewise.
* tracepoint.c (same_process_p): Likewise.
(match_blocktype): Likewise.
(build_traceframe_info_xml): Likewise.
gdb/testsuite/ChangeLog:
* gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected
source line.
2015-09-26 02:08:07 +08:00
|
|
|
btrace = (struct btrace_data *) user_data;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
btrace->format = BTRACE_FORMAT_PT;
|
|
|
|
btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
|
|
|
|
btrace->variant.pt.data = NULL;
|
|
|
|
btrace->variant.pt.size = 0;
|
|
|
|
}
|
|
|
|
|
2013-03-11 16:28:58 +08:00
|
|
|
static const struct gdb_xml_attribute block_attributes[] = {
|
|
|
|
{ "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
|
|
|
|
{ "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
|
|
|
|
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
|
|
|
|
{ "vendor", GDB_XML_AF_NONE, NULL, NULL },
|
|
|
|
{ "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
|
|
|
|
{ "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
|
|
|
|
{ "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
|
|
|
|
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct gdb_xml_element btrace_pt_config_children[] = {
|
|
|
|
{ "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
|
|
|
|
parse_xml_btrace_pt_config_cpu, NULL },
|
|
|
|
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct gdb_xml_element btrace_pt_children[] = {
|
|
|
|
{ "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
|
|
|
|
NULL },
|
|
|
|
{ "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
|
|
|
|
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
2013-03-11 16:28:58 +08:00
|
|
|
static const struct gdb_xml_attribute btrace_attributes[] = {
|
|
|
|
{ "version", GDB_XML_AF_NONE, NULL, NULL },
|
|
|
|
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct gdb_xml_element btrace_children[] = {
|
|
|
|
{ "block", block_attributes, NULL,
|
|
|
|
GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{ "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
|
|
|
|
NULL },
|
2013-03-11 16:28:58 +08:00
|
|
|
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct gdb_xml_element btrace_elements[] = {
|
|
|
|
{ "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
|
|
|
|
check_xml_btrace_version, NULL },
|
|
|
|
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* defined (HAVE_LIBEXPAT) */
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
2013-11-13 22:31:07 +08:00
|
|
|
void
|
|
|
|
parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
|
2013-03-11 16:28:58 +08:00
|
|
|
{
|
|
|
|
#if defined (HAVE_LIBEXPAT)
|
|
|
|
|
2018-07-09 02:39:36 +08:00
|
|
|
int errcode;
|
2018-06-08 05:34:36 +08:00
|
|
|
btrace_data result;
|
|
|
|
result.format = BTRACE_FORMAT_NONE;
|
2013-11-13 22:31:07 +08:00
|
|
|
|
2013-03-11 16:28:58 +08:00
|
|
|
errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
|
2018-06-08 05:34:36 +08:00
|
|
|
buffer, &result);
|
2013-03-11 16:28:58 +08:00
|
|
|
if (errcode != 0)
|
2013-06-03 21:39:35 +08:00
|
|
|
error (_("Error parsing branch trace."));
|
2013-03-11 16:28:58 +08:00
|
|
|
|
|
|
|
/* Keep parse results. */
|
2018-06-08 05:34:36 +08:00
|
|
|
*btrace = std::move (result);
|
2013-03-11 16:28:58 +08:00
|
|
|
|
|
|
|
#else /* !defined (HAVE_LIBEXPAT) */
|
|
|
|
|
2018-02-08 21:35:44 +08:00
|
|
|
error (_("Cannot process branch trace. XML support was disabled at "
|
|
|
|
"compile time."));
|
2013-03-11 16:28:58 +08:00
|
|
|
|
|
|
|
#endif /* !defined (HAVE_LIBEXPAT) */
|
|
|
|
}
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2013-11-28 22:44:13 +08:00
|
|
|
#if defined (HAVE_LIBEXPAT)
|
|
|
|
|
|
|
|
/* Parse a btrace-conf "bts" xml record. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
|
|
|
|
const struct gdb_xml_element *element,
|
2018-01-07 22:29:52 +08:00
|
|
|
void *user_data,
|
|
|
|
std::vector<gdb_xml_value> &attributes)
|
2013-11-28 22:44:13 +08:00
|
|
|
{
|
|
|
|
struct btrace_config *conf;
|
2013-11-28 23:39:12 +08:00
|
|
|
struct gdb_xml_value *size;
|
2013-11-28 22:44:13 +08:00
|
|
|
|
Add some more casts (1/2)
Note: I needed to split this patch in two, otherwise it's too big for
the mailing list.
This patch adds explicit casts to situations where a void pointer is
assigned to a pointer to the "real" type. Building in C++ mode requires
those assignments to use an explicit cast. This includes, for example:
- callback arguments (cleanups, comparison functions, ...)
- data attached to some object (objfile, program space, etc) in the form
of a void pointer
- "user data" passed to some function
This patch comes from the commit "(mostly) auto-generated patch to insert
casts needed for C++", taken from Pedro's C++ branch.
Only files built on x86 with --enable-targets=all are modified, so the
native files for other arches will need to be dealt with separately.
I built-tested this with --enable-targets=all and reg-tested. To my
surprise, a test case (selftest.exp) had to be adjusted.
Here's the ChangeLog entry. Again, this was relatively quick to make
despite the length, thanks to David Malcom's script, although I don't
believe it's very useful information in that particular case...
gdb/ChangeLog:
* aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s).
(aarch64_make_stub_cache): Likewise.
(value_of_aarch64_user_reg): Likewise.
* ada-lang.c (ada_inferior_data_cleanup): Likewise.
(get_ada_inferior_data): Likewise.
(get_ada_pspace_data): Likewise.
(ada_pspace_data_cleanup): Likewise.
(ada_complete_symbol_matcher): Likewise.
(ada_exc_search_name_matches): Likewise.
* ada-tasks.c (get_ada_tasks_pspace_data): Likewise.
(get_ada_tasks_inferior_data): Likewise.
* addrmap.c (addrmap_mutable_foreach_worker): Likewise.
(splay_obstack_alloc): Likewise.
(splay_obstack_free): Likewise.
* alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise.
(alpha_linux_collect_gregset): Likewise.
(alpha_linux_supply_fpregset): Likewise.
(alpha_linux_collect_fpregset): Likewise.
* alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise.
* alpha-tdep.c (alpha_lds): Likewise.
(alpha_sts): Likewise.
(alpha_sigtramp_frame_unwind_cache): Likewise.
(alpha_heuristic_frame_unwind_cache): Likewise.
(alpha_supply_int_regs): Likewise.
(alpha_fill_int_regs): Likewise.
(alpha_supply_fp_regs): Likewise.
(alpha_fill_fp_regs): Likewise.
* alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise.
(alphanbsd_aout_supply_gregset): Likewise.
(alphanbsd_supply_gregset): Likewise.
* amd64-linux-tdep.c (amd64_linux_init_abi): Likewise.
(amd64_x32_linux_init_abi): Likewise.
* amd64-nat.c (amd64_supply_native_gregset): Likewise.
(amd64_collect_native_gregset): Likewise.
* amd64-tdep.c (amd64_frame_cache): Likewise.
(amd64_sigtramp_frame_cache): Likewise.
(amd64_epilogue_frame_cache): Likewise.
(amd64_supply_fxsave): Likewise.
(amd64_supply_xsave): Likewise.
(amd64_collect_fxsave): Likewise.
(amd64_collect_xsave): Likewise.
* amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise.
* amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise.
* arm-linux-tdep.c (arm_linux_supply_gregset): Likewise.
(arm_linux_collect_gregset): Likewise.
(arm_linux_supply_nwfpe): Likewise.
(arm_linux_collect_nwfpe): Likewise.
(arm_linux_supply_vfp): Likewise.
(arm_linux_collect_vfp): Likewise.
* arm-tdep.c (arm_find_mapping_symbol): Likewise.
(arm_prologue_unwind_stop_reason): Likewise.
(arm_prologue_this_id): Likewise.
(arm_prologue_prev_register): Likewise.
(arm_exidx_data_free): Likewise.
(arm_find_exidx_entry): Likewise.
(arm_stub_this_id): Likewise.
(arm_m_exception_this_id): Likewise.
(arm_m_exception_prev_register): Likewise.
(arm_normal_frame_base): Likewise.
(gdb_print_insn_arm): Likewise.
(arm_objfile_data_free): Likewise.
(arm_record_special_symbol): Likewise.
(value_of_arm_user_reg): Likewise.
* armbsd-tdep.c (armbsd_supply_fpregset): Likewise.
(armbsd_supply_gregset): Likewise.
* auto-load.c (auto_load_pspace_data_cleanup): Likewise.
(get_auto_load_pspace_data): Likewise.
(hash_loaded_script_entry): Likewise.
(eq_loaded_script_entry): Likewise.
(clear_section_scripts): Likewise.
(collect_matching_scripts): Likewise.
* auxv.c (auxv_inferior_data_cleanup): Likewise.
(get_auxv_inferior_data): Likewise.
* avr-tdep.c (avr_frame_unwind_cache): Likewise.
* ax-general.c (do_free_agent_expr_cleanup): Likewise.
* bfd-target.c (target_bfd_xfer_partial): Likewise.
(target_bfd_xclose): Likewise.
(target_bfd_get_section_table): Likewise.
* bfin-tdep.c (bfin_frame_cache): Likewise.
* block.c (find_block_in_blockvector): Likewise.
(call_site_for_pc): Likewise.
(block_find_non_opaque_type_preferred): Likewise.
* break-catch-sig.c (signal_catchpoint_insert_location): Likewise.
(signal_catchpoint_remove_location): Likewise.
(signal_catchpoint_breakpoint_hit): Likewise.
(signal_catchpoint_print_one): Likewise.
(signal_catchpoint_print_mention): Likewise.
(signal_catchpoint_print_recreate): Likewise.
* break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise.
* breakpoint.c (do_cleanup_counted_command_line): Likewise.
(bp_location_compare_addrs): Likewise.
(get_first_locp_gte_addr): Likewise.
(check_tracepoint_command): Likewise.
(do_map_commands_command): Likewise.
(get_breakpoint_objfile_data): Likewise.
(free_breakpoint_probes): Likewise.
(do_captured_breakpoint_query): Likewise.
(compare_breakpoints): Likewise.
(bp_location_compare): Likewise.
(bpstat_remove_breakpoint_callback): Likewise.
(do_delete_breakpoint_cleanup): Likewise.
* bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise.
(bsd_uthread_set_collect_uthread): Likewise.
(bsd_uthread_activate): Likewise.
(bsd_uthread_fetch_registers): Likewise.
(bsd_uthread_store_registers): Likewise.
* btrace.c (check_xml_btrace_version): Likewise.
(parse_xml_btrace_block): Likewise.
(parse_xml_btrace_pt_config_cpu): Likewise.
(parse_xml_btrace_pt_raw): Likewise.
(parse_xml_btrace_pt): Likewise.
(parse_xml_btrace_conf_bts): Likewise.
(parse_xml_btrace_conf_pt): Likewise.
(do_btrace_data_cleanup): Likewise.
* c-typeprint.c (find_typedef_for_canonicalize): Likewise.
* charset.c (cleanup_iconv): Likewise.
(do_cleanup_iterator): Likewise.
* cli-out.c (cli_uiout_dtor): Likewise.
(cli_table_begin): Likewise.
(cli_table_body): Likewise.
(cli_table_end): Likewise.
(cli_table_header): Likewise.
(cli_begin): Likewise.
(cli_end): Likewise.
(cli_field_int): Likewise.
(cli_field_skip): Likewise.
(cli_field_string): Likewise.
(cli_field_fmt): Likewise.
(cli_spaces): Likewise.
(cli_text): Likewise.
(cli_message): Likewise.
(cli_wrap_hint): Likewise.
(cli_flush): Likewise.
(cli_redirect): Likewise.
(out_field_fmt): Likewise.
(field_separator): Likewise.
(cli_out_set_stream): Likewise.
* cli/cli-cmds.c (compare_symtabs): Likewise.
* cli/cli-dump.c (call_dump_func): Likewise.
(restore_section_callback): Likewise.
* cli/cli-script.c (clear_hook_in_cleanup): Likewise.
(do_restore_user_call_depth): Likewise.
(do_free_command_lines_cleanup): Likewise.
* coff-pe-read.c (get_section_vmas): Likewise.
(pe_as16): Likewise.
(pe_as32): Likewise.
* coffread.c (coff_symfile_read): Likewise.
* common/agent.c (agent_look_up_symbols): Likewise.
* common/filestuff.c (do_close_cleanup): Likewise.
* common/format.c (free_format_pieces_cleanup): Likewise.
* common/vec.c (vec_o_reserve): Likewise.
* compile/compile-c-support.c (print_one_macro): Likewise.
* compile/compile-c-symbols.c (hash_symbol_error): Likewise.
(eq_symbol_error): Likewise.
(del_symbol_error): Likewise.
(error_symbol_once): Likewise.
(gcc_convert_symbol): Likewise.
(gcc_symbol_address): Likewise.
(hash_symname): Likewise.
(eq_symname): Likewise.
* compile/compile-c-types.c (hash_type_map_instance): Likewise.
(eq_type_map_instance): Likewise.
(insert_type): Likewise.
(convert_type): Likewise.
* compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise.
(setup_sections): Likewise.
(link_hash_table_free): Likewise.
(copy_sections): Likewise.
* compile/compile-object-run.c (do_module_cleanup): Likewise.
* compile/compile.c (compile_print_value): Likewise.
(do_rmdir): Likewise.
(cleanup_compile_instance): Likewise.
(cleanup_unlink_file): Likewise.
* completer.c (free_completion_tracker): Likewise.
* corelow.c (add_to_spuid_list): Likewise.
* cp-namespace.c (reset_directive_searched): Likewise.
* cp-support.c (reset_directive_searched): Likewise.
* cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise.
(cris_frame_unwind_cache): Likewise.
* d-lang.c (builtin_d_type): Likewise.
* d-namespace.c (reset_directive_searched): Likewise.
* dbxread.c (dbx_free_symfile_info): Likewise.
(do_free_bincl_list_cleanup): Likewise.
* disasm.c (hash_dis_line_entry): Likewise.
(eq_dis_line_entry): Likewise.
(dis_asm_print_address): Likewise.
(fprintf_disasm): Likewise.
(do_ui_file_delete): Likewise.
* doublest.c (convert_floatformat_to_doublest): Likewise.
* dummy-frame.c (pop_dummy_frame_bpt): Likewise.
(dummy_frame_prev_register): Likewise.
(dummy_frame_this_id): Likewise.
* dwarf2-frame-tailcall.c (cache_hash): Likewise.
(cache_eq): Likewise.
(cache_find): Likewise.
(tailcall_frame_this_id): Likewise.
(dwarf2_tailcall_prev_register_first): Likewise.
(tailcall_frame_prev_register): Likewise.
(tailcall_frame_dealloc_cache): Likewise.
(tailcall_frame_prev_arch): Likewise.
* dwarf2-frame.c (dwarf2_frame_state_free): Likewise.
(dwarf2_frame_set_init_reg): Likewise.
(dwarf2_frame_init_reg): Likewise.
(dwarf2_frame_set_signal_frame_p): Likewise.
(dwarf2_frame_signal_frame_p): Likewise.
(dwarf2_frame_set_adjust_regnum): Likewise.
(dwarf2_frame_adjust_regnum): Likewise.
(clear_pointer_cleanup): Likewise.
(dwarf2_frame_cache): Likewise.
(find_cie): Likewise.
(dwarf2_frame_find_fde): Likewise.
* dwarf2expr.c (dwarf_expr_address_type): Likewise.
(free_dwarf_expr_context_cleanup): Likewise.
* dwarf2loc.c (locexpr_find_frame_base_location): Likewise.
(locexpr_get_frame_base): Likewise.
(loclist_find_frame_base_location): Likewise.
(loclist_get_frame_base): Likewise.
(dwarf_expr_dwarf_call): Likewise.
(dwarf_expr_get_base_type): Likewise.
(dwarf_expr_push_dwarf_reg_entry_value): Likewise.
(dwarf_expr_get_obj_addr): Likewise.
(entry_data_value_coerce_ref): Likewise.
(entry_data_value_copy_closure): Likewise.
(entry_data_value_free_closure): Likewise.
(get_frame_address_in_block_wrapper): Likewise.
(dwarf2_evaluate_property): Likewise.
(dwarf2_compile_property_to_c): Likewise.
(needs_frame_read_addr_from_reg): Likewise.
(needs_frame_get_reg_value): Likewise.
(needs_frame_frame_base): Likewise.
(needs_frame_frame_cfa): Likewise.
(needs_frame_tls_address): Likewise.
(needs_frame_dwarf_call): Likewise.
(needs_dwarf_reg_entry_value): Likewise.
(get_ax_pc): Likewise.
(locexpr_read_variable): Likewise.
(locexpr_read_variable_at_entry): Likewise.
(locexpr_read_needs_frame): Likewise.
(locexpr_describe_location): Likewise.
(locexpr_tracepoint_var_ref): Likewise.
(locexpr_generate_c_location): Likewise.
(loclist_read_variable): Likewise.
(loclist_read_variable_at_entry): Likewise.
(loclist_describe_location): Likewise.
(loclist_tracepoint_var_ref): Likewise.
(loclist_generate_c_location): Likewise.
* dwarf2read.c (line_header_hash_voidp): Likewise.
(line_header_eq_voidp): Likewise.
(dwarf2_has_info): Likewise.
(dwarf2_get_section_info): Likewise.
(locate_dwz_sections): Likewise.
(hash_file_name_entry): Likewise.
(eq_file_name_entry): Likewise.
(delete_file_name_entry): Likewise.
(dw2_setup): Likewise.
(dw2_get_file_names_reader): Likewise.
(dw2_find_pc_sect_compunit_symtab): Likewise.
(hash_signatured_type): Likewise.
(eq_signatured_type): Likewise.
(add_signatured_type_cu_to_table): Likewise.
(create_debug_types_hash_table): Likewise.
(lookup_dwo_signatured_type): Likewise.
(lookup_dwp_signatured_type): Likewise.
(lookup_signatured_type): Likewise.
(hash_type_unit_group): Likewise.
(eq_type_unit_group): Likewise.
(get_type_unit_group): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(sort_tu_by_abbrev_offset): Likewise.
(process_skeletonless_type_unit): Likewise.
(psymtabs_addrmap_cleanup): Likewise.
(dwarf2_read_symtab): Likewise.
(psymtab_to_symtab_1): Likewise.
(die_hash): Likewise.
(die_eq): Likewise.
(load_full_comp_unit_reader): Likewise.
(reset_die_in_process): Likewise.
(free_cu_line_header): Likewise.
(handle_DW_AT_stmt_list): Likewise.
(hash_dwo_file): Likewise.
(eq_dwo_file): Likewise.
(hash_dwo_unit): Likewise.
(eq_dwo_unit): Likewise.
(create_dwo_cu_reader): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(lookup_dwo_unit_in_dwp): Likewise.
(dwarf2_locate_dwo_sections): Likewise.
(dwarf2_locate_common_dwp_sections): Likewise.
(dwarf2_locate_v2_dwp_sections): Likewise.
(hash_dwp_loaded_cutus): Likewise.
(eq_dwp_loaded_cutus): Likewise.
(lookup_dwo_cutu): Likewise.
(abbrev_table_free_cleanup): Likewise.
(dwarf2_free_abbrev_table): Likewise.
(find_partial_die_in_comp_unit): Likewise.
(free_line_header_voidp): Likewise.
(follow_die_offset): Likewise.
(follow_die_sig_1): Likewise.
(free_heap_comp_unit): Likewise.
(free_stack_comp_unit): Likewise.
(dwarf2_free_objfile): Likewise.
(per_cu_offset_and_type_hash): Likewise.
(per_cu_offset_and_type_eq): Likewise.
(get_die_type_at_offset): Likewise.
(partial_die_hash): Likewise.
(partial_die_eq): Likewise.
(dwarf2_per_objfile_free): Likewise.
(hash_strtab_entry): Likewise.
(eq_strtab_entry): Likewise.
(add_string): Likewise.
(hash_symtab_entry): Likewise.
(eq_symtab_entry): Likewise.
(delete_symtab_entry): Likewise.
(cleanup_mapped_symtab): Likewise.
(add_indices_to_cpool): Likewise.
(hash_psymtab_cu_index): Likewise.
(eq_psymtab_cu_index): Likewise.
(add_address_entry_worker): Likewise.
(unlink_if_set): Likewise.
(write_one_signatured_type): Likewise.
(save_gdb_index_command): Likewise.
* elfread.c (elf_symtab_read): Likewise.
(elf_gnu_ifunc_cache_hash): Likewise.
(elf_gnu_ifunc_cache_eq): Likewise.
(elf_gnu_ifunc_record_cache): Likewise.
(elf_gnu_ifunc_resolve_by_cache): Likewise.
(elf_get_probes): Likewise.
(probe_key_free): Likewise.
* f-lang.c (builtin_f_type): Likewise.
* frame-base.c (frame_base_append_sniffer): Likewise.
(frame_base_set_default): Likewise.
(frame_base_find_by_frame): Likewise.
* frame-unwind.c (frame_unwind_prepend_unwinder): Likewise.
(frame_unwind_append_unwinder): Likewise.
(frame_unwind_find_by_frame): Likewise.
* frame.c (frame_addr_hash): Likewise.
(frame_addr_hash_eq): Likewise.
(frame_stash_find): Likewise.
(do_frame_register_read): Likewise.
(unwind_to_current_frame): Likewise.
(frame_cleanup_after_sniffer): Likewise.
* frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise.
* frv-tdep.c (frv_frame_unwind_cache): Likewise.
* ft32-tdep.c (ft32_frame_cache): Likewise.
* gcore.c (do_bfd_delete_cleanup): Likewise.
(gcore_create_callback): Likewise.
* gdb_bfd.c (hash_bfd): Likewise.
(eq_bfd): Likewise.
(gdb_bfd_open): Likewise.
(free_one_bfd_section): Likewise.
(gdb_bfd_ref): Likewise.
(gdb_bfd_unref): Likewise.
(get_section_descriptor): Likewise.
(gdb_bfd_map_section): Likewise.
(gdb_bfd_crc): Likewise.
(gdb_bfd_mark_parent): Likewise.
(gdb_bfd_record_inclusion): Likewise.
(gdb_bfd_requires_relocations): Likewise.
(print_one_bfd): Likewise.
* gdbtypes.c (type_pair_hash): Likewise.
(type_pair_eq): Likewise.
(builtin_type): Likewise.
(objfile_type): Likewise.
* gnu-v3-abi.c (vtable_ptrdiff_type): Likewise.
(vtable_address_point_offset): Likewise.
(gnuv3_get_vtable): Likewise.
(hash_value_and_voffset): Likewise.
(eq_value_and_voffset): Likewise.
(compare_value_and_voffset): Likewise.
(compute_vtable_size): Likewise.
(gnuv3_get_typeid_type): Likewise.
* go-lang.c (builtin_go_type): Likewise.
* guile/scm-block.c (bkscm_hash_block_smob): Likewise.
(bkscm_eq_block_smob): Likewise.
(bkscm_objfile_block_map): Likewise.
(bkscm_del_objfile_blocks): Likewise.
* guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise.
* guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise.
(gdbscm_disasm_print_address): Likewise.
* guile/scm-frame.c (frscm_hash_frame_smob): Likewise.
(frscm_eq_frame_smob): Likewise.
(frscm_inferior_frame_map): Likewise.
(frscm_del_inferior_frames): Likewise.
* guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise.
* guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise.
(ofscm_objfile_smob_from_objfile): Likewise.
* guile/scm-ports.c (ioscm_write): Likewise.
(ioscm_file_port_delete): Likewise.
(ioscm_file_port_rewind): Likewise.
(ioscm_file_port_put): Likewise.
(ioscm_file_port_write): Likewise.
* guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise.
(psscm_pspace_smob_from_pspace): Likewise.
* guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise.
(scscm_recording_unwind_handler): Likewise.
(gdbscm_with_catch): Likewise.
(scscm_call_0_body): Likewise.
(scscm_call_1_body): Likewise.
(scscm_call_2_body): Likewise.
(scscm_call_3_body): Likewise.
(scscm_call_4_body): Likewise.
(scscm_apply_1_body): Likewise.
(scscm_eval_scheme_string): Likewise.
(gdbscm_safe_eval_string): Likewise.
(scscm_source_scheme_script): Likewise.
(gdbscm_safe_source_script): Likewise.
* guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise.
(gdbscm_call_scm_from_stringn): Likewise.
* guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise.
(syscm_eq_symbol_smob): Likewise.
(syscm_get_symbol_map): Likewise.
(syscm_del_objfile_symbols): Likewise.
* guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise.
(stscm_eq_symtab_smob): Likewise.
(stscm_objfile_symtab_map): Likewise.
(stscm_del_objfile_symtabs): Likewise.
* guile/scm-type.c (tyscm_hash_type_smob): Likewise.
(tyscm_eq_type_smob): Likewise.
(tyscm_type_map): Likewise.
(tyscm_copy_type_recursive): Likewise.
(save_objfile_types): Likewise.
* guile/scm-utils.c (extract_arg): Likewise.
* h8300-tdep.c (h8300_frame_cache): Likewise.
* hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise.
* hppa-tdep.c (compare_unwind_entries): Likewise.
(find_unwind_entry): Likewise.
(hppa_frame_cache): Likewise.
(hppa_stub_frame_unwind_cache): Likewise.
* hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise.
* hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise.
(hppaobsd_supply_fpregset): Likewise.
* i386-cygwin-tdep.c (core_process_module_section): Likewise.
* i386-linux-tdep.c (i386_linux_init_abi): Likewise.
* i386-tdep.c (i386_frame_cache): Likewise.
(i386_epilogue_frame_cache): Likewise.
(i386_sigtramp_frame_cache): Likewise.
(i386_supply_gregset): Likewise.
(i386_collect_gregset): Likewise.
(i386_gdbarch_init): Likewise.
* i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise.
(i386obsd_trapframe_cache): Likewise.
* i387-tdep.c (i387_supply_fsave): Likewise.
(i387_collect_fsave): Likewise.
(i387_supply_fxsave): Likewise.
(i387_collect_fxsave): Likewise.
(i387_supply_xsave): Likewise.
(i387_collect_xsave): Likewise.
* ia64-tdep.c (ia64_frame_cache): Likewise.
(ia64_sigtramp_frame_cache): Likewise.
* infcmd.c (attach_command_continuation): Likewise.
(attach_command_continuation_free_args): Likewise.
* inferior.c (restore_inferior): Likewise.
(delete_thread_of_inferior): Likewise.
* inflow.c (inflow_inferior_data_cleanup): Likewise.
(get_inflow_inferior_data): Likewise.
(inflow_inferior_exit): Likewise.
* infrun.c (displaced_step_clear_cleanup): Likewise.
(restore_current_uiout_cleanup): Likewise.
(release_stop_context_cleanup): Likewise.
(do_restore_infcall_suspend_state_cleanup): Likewise.
(do_restore_infcall_control_state_cleanup): Likewise.
(restore_inferior_ptid): Likewise.
* inline-frame.c (block_starting_point_at): Likewise.
* iq2000-tdep.c (iq2000_frame_cache): Likewise.
* jit.c (get_jit_objfile_data): Likewise.
(get_jit_program_space_data): Likewise.
(jit_object_close_impl): Likewise.
(jit_find_objf_with_entry_addr): Likewise.
(jit_breakpoint_deleted): Likewise.
(jit_unwind_reg_set_impl): Likewise.
(jit_unwind_reg_get_impl): Likewise.
(jit_dealloc_cache): Likewise.
(jit_frame_sniffer): Likewise.
(jit_frame_prev_register): Likewise.
(jit_prepend_unwinder): Likewise.
(jit_inferior_exit_hook): Likewise.
(free_objfile_data): Likewise.
* jv-lang.c (jv_per_objfile_free): Likewise.
(get_dynamics_objfile): Likewise.
(get_java_class_symtab): Likewise.
(builtin_java_type): Likewise.
* language.c (language_string_char_type): Likewise.
(language_bool_type): Likewise.
(language_lookup_primitive_type): Likewise.
(language_lookup_primitive_type_as_symbol): Likewise.
* linespec.c (hash_address_entry): Likewise.
(eq_address_entry): Likewise.
(iterate_inline_only): Likewise.
(iterate_name_matcher): Likewise.
(decode_line_2_compare_items): Likewise.
(collect_one_symbol): Likewise.
(compare_symbols): Likewise.
(compare_msymbols): Likewise.
(add_symtabs_to_list): Likewise.
(collect_symbols): Likewise.
(compare_msyms): Likewise.
(add_minsym): Likewise.
(cleanup_linespec_result): Likewise.
* linux-fork.c (inferior_call_waitpid_cleanup): Likewise.
* linux-nat.c (delete_lwp_cleanup): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(resume_stopped_resumed_lwps): Likewise.
* linux-tdep.c (get_linux_gdbarch_data): Likewise.
(invalidate_linux_cache_inf): Likewise.
(get_linux_inferior_data): Likewise.
(linux_find_memory_regions_thunk): Likewise.
(linux_make_mappings_callback): Likewise.
(linux_corefile_thread_callback): Likewise.
(find_mapping_size): Likewise.
* linux-thread-db.c (find_new_threads_callback): Likewise.
* lm32-tdep.c (lm32_frame_cache): Likewise.
* m2-lang.c (builtin_m2_type): Likewise.
* m32c-tdep.c (m32c_analyze_frame_prologue): Likewise.
* m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise.
(m32r_linux_supply_gregset): Likewise.
(m32r_linux_collect_gregset): Likewise.
* m32r-tdep.c (m32r_frame_unwind_cache): Likewise.
* m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise.
* m68k-tdep.c (m68k_frame_cache): Likewise.
* m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise.
(m68kbsd_supply_gregset): Likewise.
* m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise.
* m88k-tdep.c (m88k_frame_cache): Likewise.
(m88k_supply_gregset): Likewise.
gdb/gdbserver/ChangeLog:
* dll.c (match_dll): Add cast(s).
(unloaded_dll): Likewise.
* linux-low.c (second_thread_of_pid_p): Likewise.
(delete_lwp_callback): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(linux_set_resume_request): Likewise.
* server.c (accumulate_file_name_length): Likewise.
(emit_dll_description): Likewise.
(handle_qxfer_threads_worker): Likewise.
(visit_actioned_threads): Likewise.
* thread-db.c (any_thread_of): Likewise.
* tracepoint.c (same_process_p): Likewise.
(match_blocktype): Likewise.
(build_traceframe_info_xml): Likewise.
gdb/testsuite/ChangeLog:
* gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected
source line.
2015-09-26 02:08:07 +08:00
|
|
|
conf = (struct btrace_config *) user_data;
|
2013-11-28 22:44:13 +08:00
|
|
|
conf->format = BTRACE_FORMAT_BTS;
|
2013-11-28 23:39:12 +08:00
|
|
|
conf->bts.size = 0;
|
|
|
|
|
|
|
|
size = xml_find_attribute (attributes, "size");
|
|
|
|
if (size != NULL)
|
2018-01-07 22:29:52 +08:00
|
|
|
conf->bts.size = (unsigned int) *(ULONGEST *) size->value.get ();
|
2013-11-28 22:44:13 +08:00
|
|
|
}
|
|
|
|
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
/* Parse a btrace-conf "pt" xml record. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
|
|
|
|
const struct gdb_xml_element *element,
|
2018-01-07 22:29:52 +08:00
|
|
|
void *user_data,
|
|
|
|
std::vector<gdb_xml_value> &attributes)
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{
|
|
|
|
struct btrace_config *conf;
|
|
|
|
struct gdb_xml_value *size;
|
|
|
|
|
Add some more casts (1/2)
Note: I needed to split this patch in two, otherwise it's too big for
the mailing list.
This patch adds explicit casts to situations where a void pointer is
assigned to a pointer to the "real" type. Building in C++ mode requires
those assignments to use an explicit cast. This includes, for example:
- callback arguments (cleanups, comparison functions, ...)
- data attached to some object (objfile, program space, etc) in the form
of a void pointer
- "user data" passed to some function
This patch comes from the commit "(mostly) auto-generated patch to insert
casts needed for C++", taken from Pedro's C++ branch.
Only files built on x86 with --enable-targets=all are modified, so the
native files for other arches will need to be dealt with separately.
I built-tested this with --enable-targets=all and reg-tested. To my
surprise, a test case (selftest.exp) had to be adjusted.
Here's the ChangeLog entry. Again, this was relatively quick to make
despite the length, thanks to David Malcom's script, although I don't
believe it's very useful information in that particular case...
gdb/ChangeLog:
* aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s).
(aarch64_make_stub_cache): Likewise.
(value_of_aarch64_user_reg): Likewise.
* ada-lang.c (ada_inferior_data_cleanup): Likewise.
(get_ada_inferior_data): Likewise.
(get_ada_pspace_data): Likewise.
(ada_pspace_data_cleanup): Likewise.
(ada_complete_symbol_matcher): Likewise.
(ada_exc_search_name_matches): Likewise.
* ada-tasks.c (get_ada_tasks_pspace_data): Likewise.
(get_ada_tasks_inferior_data): Likewise.
* addrmap.c (addrmap_mutable_foreach_worker): Likewise.
(splay_obstack_alloc): Likewise.
(splay_obstack_free): Likewise.
* alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise.
(alpha_linux_collect_gregset): Likewise.
(alpha_linux_supply_fpregset): Likewise.
(alpha_linux_collect_fpregset): Likewise.
* alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise.
* alpha-tdep.c (alpha_lds): Likewise.
(alpha_sts): Likewise.
(alpha_sigtramp_frame_unwind_cache): Likewise.
(alpha_heuristic_frame_unwind_cache): Likewise.
(alpha_supply_int_regs): Likewise.
(alpha_fill_int_regs): Likewise.
(alpha_supply_fp_regs): Likewise.
(alpha_fill_fp_regs): Likewise.
* alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise.
(alphanbsd_aout_supply_gregset): Likewise.
(alphanbsd_supply_gregset): Likewise.
* amd64-linux-tdep.c (amd64_linux_init_abi): Likewise.
(amd64_x32_linux_init_abi): Likewise.
* amd64-nat.c (amd64_supply_native_gregset): Likewise.
(amd64_collect_native_gregset): Likewise.
* amd64-tdep.c (amd64_frame_cache): Likewise.
(amd64_sigtramp_frame_cache): Likewise.
(amd64_epilogue_frame_cache): Likewise.
(amd64_supply_fxsave): Likewise.
(amd64_supply_xsave): Likewise.
(amd64_collect_fxsave): Likewise.
(amd64_collect_xsave): Likewise.
* amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise.
* amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise.
* arm-linux-tdep.c (arm_linux_supply_gregset): Likewise.
(arm_linux_collect_gregset): Likewise.
(arm_linux_supply_nwfpe): Likewise.
(arm_linux_collect_nwfpe): Likewise.
(arm_linux_supply_vfp): Likewise.
(arm_linux_collect_vfp): Likewise.
* arm-tdep.c (arm_find_mapping_symbol): Likewise.
(arm_prologue_unwind_stop_reason): Likewise.
(arm_prologue_this_id): Likewise.
(arm_prologue_prev_register): Likewise.
(arm_exidx_data_free): Likewise.
(arm_find_exidx_entry): Likewise.
(arm_stub_this_id): Likewise.
(arm_m_exception_this_id): Likewise.
(arm_m_exception_prev_register): Likewise.
(arm_normal_frame_base): Likewise.
(gdb_print_insn_arm): Likewise.
(arm_objfile_data_free): Likewise.
(arm_record_special_symbol): Likewise.
(value_of_arm_user_reg): Likewise.
* armbsd-tdep.c (armbsd_supply_fpregset): Likewise.
(armbsd_supply_gregset): Likewise.
* auto-load.c (auto_load_pspace_data_cleanup): Likewise.
(get_auto_load_pspace_data): Likewise.
(hash_loaded_script_entry): Likewise.
(eq_loaded_script_entry): Likewise.
(clear_section_scripts): Likewise.
(collect_matching_scripts): Likewise.
* auxv.c (auxv_inferior_data_cleanup): Likewise.
(get_auxv_inferior_data): Likewise.
* avr-tdep.c (avr_frame_unwind_cache): Likewise.
* ax-general.c (do_free_agent_expr_cleanup): Likewise.
* bfd-target.c (target_bfd_xfer_partial): Likewise.
(target_bfd_xclose): Likewise.
(target_bfd_get_section_table): Likewise.
* bfin-tdep.c (bfin_frame_cache): Likewise.
* block.c (find_block_in_blockvector): Likewise.
(call_site_for_pc): Likewise.
(block_find_non_opaque_type_preferred): Likewise.
* break-catch-sig.c (signal_catchpoint_insert_location): Likewise.
(signal_catchpoint_remove_location): Likewise.
(signal_catchpoint_breakpoint_hit): Likewise.
(signal_catchpoint_print_one): Likewise.
(signal_catchpoint_print_mention): Likewise.
(signal_catchpoint_print_recreate): Likewise.
* break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise.
* breakpoint.c (do_cleanup_counted_command_line): Likewise.
(bp_location_compare_addrs): Likewise.
(get_first_locp_gte_addr): Likewise.
(check_tracepoint_command): Likewise.
(do_map_commands_command): Likewise.
(get_breakpoint_objfile_data): Likewise.
(free_breakpoint_probes): Likewise.
(do_captured_breakpoint_query): Likewise.
(compare_breakpoints): Likewise.
(bp_location_compare): Likewise.
(bpstat_remove_breakpoint_callback): Likewise.
(do_delete_breakpoint_cleanup): Likewise.
* bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise.
(bsd_uthread_set_collect_uthread): Likewise.
(bsd_uthread_activate): Likewise.
(bsd_uthread_fetch_registers): Likewise.
(bsd_uthread_store_registers): Likewise.
* btrace.c (check_xml_btrace_version): Likewise.
(parse_xml_btrace_block): Likewise.
(parse_xml_btrace_pt_config_cpu): Likewise.
(parse_xml_btrace_pt_raw): Likewise.
(parse_xml_btrace_pt): Likewise.
(parse_xml_btrace_conf_bts): Likewise.
(parse_xml_btrace_conf_pt): Likewise.
(do_btrace_data_cleanup): Likewise.
* c-typeprint.c (find_typedef_for_canonicalize): Likewise.
* charset.c (cleanup_iconv): Likewise.
(do_cleanup_iterator): Likewise.
* cli-out.c (cli_uiout_dtor): Likewise.
(cli_table_begin): Likewise.
(cli_table_body): Likewise.
(cli_table_end): Likewise.
(cli_table_header): Likewise.
(cli_begin): Likewise.
(cli_end): Likewise.
(cli_field_int): Likewise.
(cli_field_skip): Likewise.
(cli_field_string): Likewise.
(cli_field_fmt): Likewise.
(cli_spaces): Likewise.
(cli_text): Likewise.
(cli_message): Likewise.
(cli_wrap_hint): Likewise.
(cli_flush): Likewise.
(cli_redirect): Likewise.
(out_field_fmt): Likewise.
(field_separator): Likewise.
(cli_out_set_stream): Likewise.
* cli/cli-cmds.c (compare_symtabs): Likewise.
* cli/cli-dump.c (call_dump_func): Likewise.
(restore_section_callback): Likewise.
* cli/cli-script.c (clear_hook_in_cleanup): Likewise.
(do_restore_user_call_depth): Likewise.
(do_free_command_lines_cleanup): Likewise.
* coff-pe-read.c (get_section_vmas): Likewise.
(pe_as16): Likewise.
(pe_as32): Likewise.
* coffread.c (coff_symfile_read): Likewise.
* common/agent.c (agent_look_up_symbols): Likewise.
* common/filestuff.c (do_close_cleanup): Likewise.
* common/format.c (free_format_pieces_cleanup): Likewise.
* common/vec.c (vec_o_reserve): Likewise.
* compile/compile-c-support.c (print_one_macro): Likewise.
* compile/compile-c-symbols.c (hash_symbol_error): Likewise.
(eq_symbol_error): Likewise.
(del_symbol_error): Likewise.
(error_symbol_once): Likewise.
(gcc_convert_symbol): Likewise.
(gcc_symbol_address): Likewise.
(hash_symname): Likewise.
(eq_symname): Likewise.
* compile/compile-c-types.c (hash_type_map_instance): Likewise.
(eq_type_map_instance): Likewise.
(insert_type): Likewise.
(convert_type): Likewise.
* compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise.
(setup_sections): Likewise.
(link_hash_table_free): Likewise.
(copy_sections): Likewise.
* compile/compile-object-run.c (do_module_cleanup): Likewise.
* compile/compile.c (compile_print_value): Likewise.
(do_rmdir): Likewise.
(cleanup_compile_instance): Likewise.
(cleanup_unlink_file): Likewise.
* completer.c (free_completion_tracker): Likewise.
* corelow.c (add_to_spuid_list): Likewise.
* cp-namespace.c (reset_directive_searched): Likewise.
* cp-support.c (reset_directive_searched): Likewise.
* cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise.
(cris_frame_unwind_cache): Likewise.
* d-lang.c (builtin_d_type): Likewise.
* d-namespace.c (reset_directive_searched): Likewise.
* dbxread.c (dbx_free_symfile_info): Likewise.
(do_free_bincl_list_cleanup): Likewise.
* disasm.c (hash_dis_line_entry): Likewise.
(eq_dis_line_entry): Likewise.
(dis_asm_print_address): Likewise.
(fprintf_disasm): Likewise.
(do_ui_file_delete): Likewise.
* doublest.c (convert_floatformat_to_doublest): Likewise.
* dummy-frame.c (pop_dummy_frame_bpt): Likewise.
(dummy_frame_prev_register): Likewise.
(dummy_frame_this_id): Likewise.
* dwarf2-frame-tailcall.c (cache_hash): Likewise.
(cache_eq): Likewise.
(cache_find): Likewise.
(tailcall_frame_this_id): Likewise.
(dwarf2_tailcall_prev_register_first): Likewise.
(tailcall_frame_prev_register): Likewise.
(tailcall_frame_dealloc_cache): Likewise.
(tailcall_frame_prev_arch): Likewise.
* dwarf2-frame.c (dwarf2_frame_state_free): Likewise.
(dwarf2_frame_set_init_reg): Likewise.
(dwarf2_frame_init_reg): Likewise.
(dwarf2_frame_set_signal_frame_p): Likewise.
(dwarf2_frame_signal_frame_p): Likewise.
(dwarf2_frame_set_adjust_regnum): Likewise.
(dwarf2_frame_adjust_regnum): Likewise.
(clear_pointer_cleanup): Likewise.
(dwarf2_frame_cache): Likewise.
(find_cie): Likewise.
(dwarf2_frame_find_fde): Likewise.
* dwarf2expr.c (dwarf_expr_address_type): Likewise.
(free_dwarf_expr_context_cleanup): Likewise.
* dwarf2loc.c (locexpr_find_frame_base_location): Likewise.
(locexpr_get_frame_base): Likewise.
(loclist_find_frame_base_location): Likewise.
(loclist_get_frame_base): Likewise.
(dwarf_expr_dwarf_call): Likewise.
(dwarf_expr_get_base_type): Likewise.
(dwarf_expr_push_dwarf_reg_entry_value): Likewise.
(dwarf_expr_get_obj_addr): Likewise.
(entry_data_value_coerce_ref): Likewise.
(entry_data_value_copy_closure): Likewise.
(entry_data_value_free_closure): Likewise.
(get_frame_address_in_block_wrapper): Likewise.
(dwarf2_evaluate_property): Likewise.
(dwarf2_compile_property_to_c): Likewise.
(needs_frame_read_addr_from_reg): Likewise.
(needs_frame_get_reg_value): Likewise.
(needs_frame_frame_base): Likewise.
(needs_frame_frame_cfa): Likewise.
(needs_frame_tls_address): Likewise.
(needs_frame_dwarf_call): Likewise.
(needs_dwarf_reg_entry_value): Likewise.
(get_ax_pc): Likewise.
(locexpr_read_variable): Likewise.
(locexpr_read_variable_at_entry): Likewise.
(locexpr_read_needs_frame): Likewise.
(locexpr_describe_location): Likewise.
(locexpr_tracepoint_var_ref): Likewise.
(locexpr_generate_c_location): Likewise.
(loclist_read_variable): Likewise.
(loclist_read_variable_at_entry): Likewise.
(loclist_describe_location): Likewise.
(loclist_tracepoint_var_ref): Likewise.
(loclist_generate_c_location): Likewise.
* dwarf2read.c (line_header_hash_voidp): Likewise.
(line_header_eq_voidp): Likewise.
(dwarf2_has_info): Likewise.
(dwarf2_get_section_info): Likewise.
(locate_dwz_sections): Likewise.
(hash_file_name_entry): Likewise.
(eq_file_name_entry): Likewise.
(delete_file_name_entry): Likewise.
(dw2_setup): Likewise.
(dw2_get_file_names_reader): Likewise.
(dw2_find_pc_sect_compunit_symtab): Likewise.
(hash_signatured_type): Likewise.
(eq_signatured_type): Likewise.
(add_signatured_type_cu_to_table): Likewise.
(create_debug_types_hash_table): Likewise.
(lookup_dwo_signatured_type): Likewise.
(lookup_dwp_signatured_type): Likewise.
(lookup_signatured_type): Likewise.
(hash_type_unit_group): Likewise.
(eq_type_unit_group): Likewise.
(get_type_unit_group): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(sort_tu_by_abbrev_offset): Likewise.
(process_skeletonless_type_unit): Likewise.
(psymtabs_addrmap_cleanup): Likewise.
(dwarf2_read_symtab): Likewise.
(psymtab_to_symtab_1): Likewise.
(die_hash): Likewise.
(die_eq): Likewise.
(load_full_comp_unit_reader): Likewise.
(reset_die_in_process): Likewise.
(free_cu_line_header): Likewise.
(handle_DW_AT_stmt_list): Likewise.
(hash_dwo_file): Likewise.
(eq_dwo_file): Likewise.
(hash_dwo_unit): Likewise.
(eq_dwo_unit): Likewise.
(create_dwo_cu_reader): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(lookup_dwo_unit_in_dwp): Likewise.
(dwarf2_locate_dwo_sections): Likewise.
(dwarf2_locate_common_dwp_sections): Likewise.
(dwarf2_locate_v2_dwp_sections): Likewise.
(hash_dwp_loaded_cutus): Likewise.
(eq_dwp_loaded_cutus): Likewise.
(lookup_dwo_cutu): Likewise.
(abbrev_table_free_cleanup): Likewise.
(dwarf2_free_abbrev_table): Likewise.
(find_partial_die_in_comp_unit): Likewise.
(free_line_header_voidp): Likewise.
(follow_die_offset): Likewise.
(follow_die_sig_1): Likewise.
(free_heap_comp_unit): Likewise.
(free_stack_comp_unit): Likewise.
(dwarf2_free_objfile): Likewise.
(per_cu_offset_and_type_hash): Likewise.
(per_cu_offset_and_type_eq): Likewise.
(get_die_type_at_offset): Likewise.
(partial_die_hash): Likewise.
(partial_die_eq): Likewise.
(dwarf2_per_objfile_free): Likewise.
(hash_strtab_entry): Likewise.
(eq_strtab_entry): Likewise.
(add_string): Likewise.
(hash_symtab_entry): Likewise.
(eq_symtab_entry): Likewise.
(delete_symtab_entry): Likewise.
(cleanup_mapped_symtab): Likewise.
(add_indices_to_cpool): Likewise.
(hash_psymtab_cu_index): Likewise.
(eq_psymtab_cu_index): Likewise.
(add_address_entry_worker): Likewise.
(unlink_if_set): Likewise.
(write_one_signatured_type): Likewise.
(save_gdb_index_command): Likewise.
* elfread.c (elf_symtab_read): Likewise.
(elf_gnu_ifunc_cache_hash): Likewise.
(elf_gnu_ifunc_cache_eq): Likewise.
(elf_gnu_ifunc_record_cache): Likewise.
(elf_gnu_ifunc_resolve_by_cache): Likewise.
(elf_get_probes): Likewise.
(probe_key_free): Likewise.
* f-lang.c (builtin_f_type): Likewise.
* frame-base.c (frame_base_append_sniffer): Likewise.
(frame_base_set_default): Likewise.
(frame_base_find_by_frame): Likewise.
* frame-unwind.c (frame_unwind_prepend_unwinder): Likewise.
(frame_unwind_append_unwinder): Likewise.
(frame_unwind_find_by_frame): Likewise.
* frame.c (frame_addr_hash): Likewise.
(frame_addr_hash_eq): Likewise.
(frame_stash_find): Likewise.
(do_frame_register_read): Likewise.
(unwind_to_current_frame): Likewise.
(frame_cleanup_after_sniffer): Likewise.
* frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise.
* frv-tdep.c (frv_frame_unwind_cache): Likewise.
* ft32-tdep.c (ft32_frame_cache): Likewise.
* gcore.c (do_bfd_delete_cleanup): Likewise.
(gcore_create_callback): Likewise.
* gdb_bfd.c (hash_bfd): Likewise.
(eq_bfd): Likewise.
(gdb_bfd_open): Likewise.
(free_one_bfd_section): Likewise.
(gdb_bfd_ref): Likewise.
(gdb_bfd_unref): Likewise.
(get_section_descriptor): Likewise.
(gdb_bfd_map_section): Likewise.
(gdb_bfd_crc): Likewise.
(gdb_bfd_mark_parent): Likewise.
(gdb_bfd_record_inclusion): Likewise.
(gdb_bfd_requires_relocations): Likewise.
(print_one_bfd): Likewise.
* gdbtypes.c (type_pair_hash): Likewise.
(type_pair_eq): Likewise.
(builtin_type): Likewise.
(objfile_type): Likewise.
* gnu-v3-abi.c (vtable_ptrdiff_type): Likewise.
(vtable_address_point_offset): Likewise.
(gnuv3_get_vtable): Likewise.
(hash_value_and_voffset): Likewise.
(eq_value_and_voffset): Likewise.
(compare_value_and_voffset): Likewise.
(compute_vtable_size): Likewise.
(gnuv3_get_typeid_type): Likewise.
* go-lang.c (builtin_go_type): Likewise.
* guile/scm-block.c (bkscm_hash_block_smob): Likewise.
(bkscm_eq_block_smob): Likewise.
(bkscm_objfile_block_map): Likewise.
(bkscm_del_objfile_blocks): Likewise.
* guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise.
* guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise.
(gdbscm_disasm_print_address): Likewise.
* guile/scm-frame.c (frscm_hash_frame_smob): Likewise.
(frscm_eq_frame_smob): Likewise.
(frscm_inferior_frame_map): Likewise.
(frscm_del_inferior_frames): Likewise.
* guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise.
* guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise.
(ofscm_objfile_smob_from_objfile): Likewise.
* guile/scm-ports.c (ioscm_write): Likewise.
(ioscm_file_port_delete): Likewise.
(ioscm_file_port_rewind): Likewise.
(ioscm_file_port_put): Likewise.
(ioscm_file_port_write): Likewise.
* guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise.
(psscm_pspace_smob_from_pspace): Likewise.
* guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise.
(scscm_recording_unwind_handler): Likewise.
(gdbscm_with_catch): Likewise.
(scscm_call_0_body): Likewise.
(scscm_call_1_body): Likewise.
(scscm_call_2_body): Likewise.
(scscm_call_3_body): Likewise.
(scscm_call_4_body): Likewise.
(scscm_apply_1_body): Likewise.
(scscm_eval_scheme_string): Likewise.
(gdbscm_safe_eval_string): Likewise.
(scscm_source_scheme_script): Likewise.
(gdbscm_safe_source_script): Likewise.
* guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise.
(gdbscm_call_scm_from_stringn): Likewise.
* guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise.
(syscm_eq_symbol_smob): Likewise.
(syscm_get_symbol_map): Likewise.
(syscm_del_objfile_symbols): Likewise.
* guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise.
(stscm_eq_symtab_smob): Likewise.
(stscm_objfile_symtab_map): Likewise.
(stscm_del_objfile_symtabs): Likewise.
* guile/scm-type.c (tyscm_hash_type_smob): Likewise.
(tyscm_eq_type_smob): Likewise.
(tyscm_type_map): Likewise.
(tyscm_copy_type_recursive): Likewise.
(save_objfile_types): Likewise.
* guile/scm-utils.c (extract_arg): Likewise.
* h8300-tdep.c (h8300_frame_cache): Likewise.
* hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise.
* hppa-tdep.c (compare_unwind_entries): Likewise.
(find_unwind_entry): Likewise.
(hppa_frame_cache): Likewise.
(hppa_stub_frame_unwind_cache): Likewise.
* hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise.
* hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise.
(hppaobsd_supply_fpregset): Likewise.
* i386-cygwin-tdep.c (core_process_module_section): Likewise.
* i386-linux-tdep.c (i386_linux_init_abi): Likewise.
* i386-tdep.c (i386_frame_cache): Likewise.
(i386_epilogue_frame_cache): Likewise.
(i386_sigtramp_frame_cache): Likewise.
(i386_supply_gregset): Likewise.
(i386_collect_gregset): Likewise.
(i386_gdbarch_init): Likewise.
* i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise.
(i386obsd_trapframe_cache): Likewise.
* i387-tdep.c (i387_supply_fsave): Likewise.
(i387_collect_fsave): Likewise.
(i387_supply_fxsave): Likewise.
(i387_collect_fxsave): Likewise.
(i387_supply_xsave): Likewise.
(i387_collect_xsave): Likewise.
* ia64-tdep.c (ia64_frame_cache): Likewise.
(ia64_sigtramp_frame_cache): Likewise.
* infcmd.c (attach_command_continuation): Likewise.
(attach_command_continuation_free_args): Likewise.
* inferior.c (restore_inferior): Likewise.
(delete_thread_of_inferior): Likewise.
* inflow.c (inflow_inferior_data_cleanup): Likewise.
(get_inflow_inferior_data): Likewise.
(inflow_inferior_exit): Likewise.
* infrun.c (displaced_step_clear_cleanup): Likewise.
(restore_current_uiout_cleanup): Likewise.
(release_stop_context_cleanup): Likewise.
(do_restore_infcall_suspend_state_cleanup): Likewise.
(do_restore_infcall_control_state_cleanup): Likewise.
(restore_inferior_ptid): Likewise.
* inline-frame.c (block_starting_point_at): Likewise.
* iq2000-tdep.c (iq2000_frame_cache): Likewise.
* jit.c (get_jit_objfile_data): Likewise.
(get_jit_program_space_data): Likewise.
(jit_object_close_impl): Likewise.
(jit_find_objf_with_entry_addr): Likewise.
(jit_breakpoint_deleted): Likewise.
(jit_unwind_reg_set_impl): Likewise.
(jit_unwind_reg_get_impl): Likewise.
(jit_dealloc_cache): Likewise.
(jit_frame_sniffer): Likewise.
(jit_frame_prev_register): Likewise.
(jit_prepend_unwinder): Likewise.
(jit_inferior_exit_hook): Likewise.
(free_objfile_data): Likewise.
* jv-lang.c (jv_per_objfile_free): Likewise.
(get_dynamics_objfile): Likewise.
(get_java_class_symtab): Likewise.
(builtin_java_type): Likewise.
* language.c (language_string_char_type): Likewise.
(language_bool_type): Likewise.
(language_lookup_primitive_type): Likewise.
(language_lookup_primitive_type_as_symbol): Likewise.
* linespec.c (hash_address_entry): Likewise.
(eq_address_entry): Likewise.
(iterate_inline_only): Likewise.
(iterate_name_matcher): Likewise.
(decode_line_2_compare_items): Likewise.
(collect_one_symbol): Likewise.
(compare_symbols): Likewise.
(compare_msymbols): Likewise.
(add_symtabs_to_list): Likewise.
(collect_symbols): Likewise.
(compare_msyms): Likewise.
(add_minsym): Likewise.
(cleanup_linespec_result): Likewise.
* linux-fork.c (inferior_call_waitpid_cleanup): Likewise.
* linux-nat.c (delete_lwp_cleanup): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(resume_stopped_resumed_lwps): Likewise.
* linux-tdep.c (get_linux_gdbarch_data): Likewise.
(invalidate_linux_cache_inf): Likewise.
(get_linux_inferior_data): Likewise.
(linux_find_memory_regions_thunk): Likewise.
(linux_make_mappings_callback): Likewise.
(linux_corefile_thread_callback): Likewise.
(find_mapping_size): Likewise.
* linux-thread-db.c (find_new_threads_callback): Likewise.
* lm32-tdep.c (lm32_frame_cache): Likewise.
* m2-lang.c (builtin_m2_type): Likewise.
* m32c-tdep.c (m32c_analyze_frame_prologue): Likewise.
* m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise.
(m32r_linux_supply_gregset): Likewise.
(m32r_linux_collect_gregset): Likewise.
* m32r-tdep.c (m32r_frame_unwind_cache): Likewise.
* m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise.
* m68k-tdep.c (m68k_frame_cache): Likewise.
* m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise.
(m68kbsd_supply_gregset): Likewise.
* m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise.
* m88k-tdep.c (m88k_frame_cache): Likewise.
(m88k_supply_gregset): Likewise.
gdb/gdbserver/ChangeLog:
* dll.c (match_dll): Add cast(s).
(unloaded_dll): Likewise.
* linux-low.c (second_thread_of_pid_p): Likewise.
(delete_lwp_callback): Likewise.
(count_events_callback): Likewise.
(select_event_lwp_callback): Likewise.
(linux_set_resume_request): Likewise.
* server.c (accumulate_file_name_length): Likewise.
(emit_dll_description): Likewise.
(handle_qxfer_threads_worker): Likewise.
(visit_actioned_threads): Likewise.
* thread-db.c (any_thread_of): Likewise.
* tracepoint.c (same_process_p): Likewise.
(match_blocktype): Likewise.
(build_traceframe_info_xml): Likewise.
gdb/testsuite/ChangeLog:
* gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected
source line.
2015-09-26 02:08:07 +08:00
|
|
|
conf = (struct btrace_config *) user_data;
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
conf->format = BTRACE_FORMAT_PT;
|
|
|
|
conf->pt.size = 0;
|
|
|
|
|
|
|
|
size = xml_find_attribute (attributes, "size");
|
|
|
|
if (size != NULL)
|
2018-01-07 22:29:52 +08:00
|
|
|
conf->pt.size = (unsigned int) *(ULONGEST *) size->value.get ();
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
|
|
|
|
{ "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
|
|
|
|
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
2013-11-28 23:39:12 +08:00
|
|
|
static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
|
|
|
|
{ "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
|
|
|
|
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
2013-11-28 22:44:13 +08:00
|
|
|
static const struct gdb_xml_element btrace_conf_children[] = {
|
2013-11-28 23:39:12 +08:00
|
|
|
{ "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
|
|
|
|
parse_xml_btrace_conf_bts, NULL },
|
btrace: support Intel(R) Processor Trace
Adds a new command "record btrace pt" to configure the kernel to use
Intel(R) Processor Trace instead of Branch Trace Strore.
The "record btrace" command chooses the tracing format automatically.
Intel(R) Processor Trace support requires Linux 4.1 and libipt.
gdb/
* NEWS: Announce new commands "record btrace pt" and "record pt".
Announce new options "set|show record btrace pt buffer-size".
* btrace.c: Include "rsp-low.h".
Include "inttypes.h".
(btrace_add_pc): Add forward declaration.
(pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback)
(pt_translate_cpu_vendor, btrace_finalize_ftrace_pt)
(btrace_compute_ftrace_pt): New.
(btrace_compute_ftrace): Support BTRACE_FORMAT_PT.
(check_xml_btrace_version): Update version check.
(parse_xml_raw, parse_xml_btrace_pt_config_cpu)
(parse_xml_btrace_pt_raw, parse_xml_btrace_pt)
(btrace_pt_config_cpu_attributes, btrace_pt_config_children)
(btrace_pt_children): New.
(btrace_children): Add support for "pt".
(parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New.
(btrace_conf_children): Add support for "pt".
* btrace.h: Include "intel-pt.h".
(btrace_pt_error): New.
* common/btrace-common.c (btrace_format_string, btrace_data_fini)
(btrace_data_empty): Support BTRACE_FORMAT_PT.
* common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT.
(struct btrace_config_pt): New.
(struct btrace_config)<pt>: New.
(struct btrace_data_pt_config, struct btrace_data_pt): New.
(struct btrace_data)<pt>: New.
* features/btrace-conf.dtd (btrace-conf)<pt>: New.
(pt): New.
* features/btrace.dtd (btrace)<pt>: New.
(pt, pt-config, cpu): New.
* nat/linux-btrace.c (perf_event_read, perf_event_read_all)
(perf_event_pt_event_type, kernel_supports_pt)
(linux_supports_pt): New.
(linux_supports_btrace): Support BTRACE_FORMAT_PT.
(linux_enable_bts): Free tinfo on error.
(linux_enable_pt): New.
(linux_enable_btrace): Support BTRACE_FORMAT_PT.
(linux_disable_pt): New.
(linux_disable_btrace): Support BTRACE_FORMAT_PT.
(linux_fill_btrace_pt_config, linux_read_pt): New.
(linux_read_btrace): Support BTRACE_FORMAT_PT.
* nat/linux-btrace.h (struct btrace_tinfo_pt): New.
(struct btrace_target_info)<pt>: New.
* record-btrace.c (set_record_btrace_pt_cmdlist)
(show_record_btrace_pt_cmdlist): New.
(record_btrace_print_pt_conf): New.
(record_btrace_print_conf): Support BTRACE_FORMAT_PT.
(btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT.
(cmd_record_btrace_pt_start): New.
(cmd_record_btrace_start): Support BTRACE_FORMAT_PT.
(cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New.
(_initialize_record_btrace): Add new commands.
* remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New.
(remote_protocol_features): Add "Qbtrace:pt".
Add "Qbtrace-conf:pt:size".
(remote_supports_btrace): Support BTRACE_FORMAT_PT.
(btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size.
(remote_enable_btrace): Support BTRACE_FORMAT_PT.
(_initialize_remote): Add new commands.
gdbserver/
* linux-low.c: Include "rsp-low.h"
(linux_low_encode_pt_config, linux_low_encode_raw): New.
(linux_low_read_btrace): Support BTRACE_FORMAT_PT.
(linux_low_btrace_conf): Support BTRACE_FORMAT_PT.
(handle_btrace_enable_pt): New.
(handle_btrace_general_set): Support "pt".
(handle_btrace_conf_general_set): Support "pt:size".
doc/
* gdb.texinfo (Process Record and Replay): Spell out that variables
and registers are not available during btrace replay.
Describe the new "record btrace pt" command.
Describe the new "set|show record btrace pt buffer-size" options.
(General Query Packets): Describe the new Qbtrace:pt and
Qbtrace-conf:pt:size packets.
Expand "bts" to "Branch Trace Store".
Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
|
|
|
{ "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
|
|
|
|
parse_xml_btrace_conf_pt, NULL },
|
2013-11-28 22:44:13 +08:00
|
|
|
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct gdb_xml_attribute btrace_conf_attributes[] = {
|
|
|
|
{ "version", GDB_XML_AF_NONE, NULL, NULL },
|
|
|
|
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct gdb_xml_element btrace_conf_elements[] = {
|
|
|
|
{ "btrace-conf", btrace_conf_attributes, btrace_conf_children,
|
|
|
|
GDB_XML_EF_NONE, NULL, NULL },
|
|
|
|
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* defined (HAVE_LIBEXPAT) */
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
|
|
|
|
{
|
|
|
|
#if defined (HAVE_LIBEXPAT)
|
|
|
|
|
2018-07-09 02:39:36 +08:00
|
|
|
int errcode;
|
2013-11-28 22:44:13 +08:00
|
|
|
errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
|
|
|
|
btrace_conf_elements, xml, conf);
|
|
|
|
if (errcode != 0)
|
|
|
|
error (_("Error parsing branch trace configuration."));
|
|
|
|
|
|
|
|
#else /* !defined (HAVE_LIBEXPAT) */
|
|
|
|
|
2018-02-08 21:35:44 +08:00
|
|
|
error (_("Cannot process the branch trace configuration. XML support "
|
|
|
|
"was disabled at compile time."));
|
2013-11-28 22:44:13 +08:00
|
|
|
|
|
|
|
#endif /* !defined (HAVE_LIBEXPAT) */
|
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
const struct btrace_insn *
|
|
|
|
btrace_insn_get (const struct btrace_insn_iterator *it)
|
|
|
|
{
|
|
|
|
const struct btrace_function *bfun;
|
|
|
|
unsigned int index, end;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
index = it->insn_index;
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &it->btinfo->functions[it->call_index];
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
/* Check if the iterator points to a gap in the trace. */
|
|
|
|
if (bfun->errcode != 0)
|
|
|
|
return NULL;
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* The index is within the bounds of this function's instruction vector. */
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
end = bfun->insn.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
gdb_assert (0 < end);
|
|
|
|
gdb_assert (index < end);
|
|
|
|
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
return &bfun->insn[index];
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
2016-11-21 23:39:57 +08:00
|
|
|
int
|
|
|
|
btrace_insn_get_error (const struct btrace_insn_iterator *it)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
return it->btinfo->functions[it->call_index].errcode;
|
2016-11-21 23:39:57 +08:00
|
|
|
}
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
|
2016-11-21 23:39:57 +08:00
|
|
|
/* See btrace.h. */
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
|
2016-11-21 23:39:57 +08:00
|
|
|
unsigned int
|
|
|
|
btrace_insn_number (const struct btrace_insn_iterator *it)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_insn_begin (struct btrace_insn_iterator *it,
|
|
|
|
const struct btrace_thread_info *btinfo)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
error (_("No trace."));
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
it->btinfo = btinfo;
|
2017-05-30 18:47:37 +08:00
|
|
|
it->call_index = 0;
|
|
|
|
it->insn_index = 0;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_insn_end (struct btrace_insn_iterator *it,
|
|
|
|
const struct btrace_thread_info *btinfo)
|
|
|
|
{
|
|
|
|
const struct btrace_function *bfun;
|
|
|
|
unsigned int length;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
error (_("No trace."));
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &btinfo->functions.back ();
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
length = bfun->insn.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
/* The last function may either be a gap or it contains the current
|
|
|
|
instruction, which is one past the end of the execution trace; ignore
|
|
|
|
it. */
|
|
|
|
if (length > 0)
|
|
|
|
length -= 1;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
it->btinfo = btinfo;
|
2017-05-30 18:47:37 +08:00
|
|
|
it->call_index = bfun->number - 1;
|
|
|
|
it->insn_index = length;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
unsigned int
|
|
|
|
btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
|
|
|
|
{
|
|
|
|
const struct btrace_function *bfun;
|
|
|
|
unsigned int index, steps;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &it->btinfo->functions[it->call_index];
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
steps = 0;
|
2017-05-30 18:47:37 +08:00
|
|
|
index = it->insn_index;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
while (stride != 0)
|
|
|
|
{
|
|
|
|
unsigned int end, space, adv;
|
|
|
|
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
end = bfun->insn.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
/* An empty function segment represents a gap in the trace. We count
|
|
|
|
it as one instruction. */
|
|
|
|
if (end == 0)
|
|
|
|
{
|
|
|
|
const struct btrace_function *next;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
if (next == NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
stride -= 1;
|
|
|
|
steps += 1;
|
|
|
|
|
|
|
|
bfun = next;
|
|
|
|
index = 0;
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
gdb_assert (0 < end);
|
|
|
|
gdb_assert (index < end);
|
|
|
|
|
|
|
|
/* Compute the number of instructions remaining in this segment. */
|
|
|
|
space = end - index;
|
|
|
|
|
|
|
|
/* Advance the iterator as far as possible within this segment. */
|
gdb: Use std::min and std::max throughout
Otherwise including <string> or some other C++ header is broken.
E.g.:
In file included from /opt/gcc/include/c++/7.0.0/bits/char_traits.h:39:0,
from /opt/gcc/include/c++/7.0.0/string:40,
from /home/pedro/gdb/mygit/cxx-convertion/src/gdb/infrun.c:68:
/opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:243:56: error: macro "min" passed 3 arguments, but takes just 2
min(const _Tp& __a, const _Tp& __b, _Compare __comp)
^
/opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:265:56: error: macro "max" passed 3 arguments, but takes just 2
max(const _Tp& __a, const _Tp& __b, _Compare __comp)
^
In file included from .../src/gdb/infrun.c:21:0:
To the best of my grepping abilities, I believe I adjusted all min/max
calls.
gdb/ChangeLog:
2016-09-16 Pedro Alves <palves@redhat.com>
* defs.h (min, max): Delete.
* aarch64-tdep.c: Include <algorithm> and use std::min and
std::max throughout.
* aarch64-tdep.c: Likewise.
* alpha-tdep.c: Likewise.
* amd64-tdep.c: Likewise.
* amd64-windows-tdep.c: Likewise.
* arm-tdep.c: Likewise.
* avr-tdep.c: Likewise.
* breakpoint.c: Likewise.
* btrace.c: Likewise.
* ctf.c: Likewise.
* disasm.c: Likewise.
* doublest.c: Likewise.
* dwarf2loc.c: Likewise.
* dwarf2read.c: Likewise.
* environ.c: Likewise.
* exec.c: Likewise.
* f-exp.y: Likewise.
* findcmd.c: Likewise.
* ft32-tdep.c: Likewise.
* gcore.c: Likewise.
* hppa-tdep.c: Likewise.
* i386-darwin-tdep.c: Likewise.
* i386-tdep.c: Likewise.
* linux-thread-db.c: Likewise.
* lm32-tdep.c: Likewise.
* m32r-tdep.c: Likewise.
* m88k-tdep.c: Likewise.
* memrange.c: Likewise.
* minidebug.c: Likewise.
* mips-tdep.c: Likewise.
* moxie-tdep.c: Likewise.
* nds32-tdep.c: Likewise.
* nios2-tdep.c: Likewise.
* nto-procfs.c: Likewise.
* parse.c: Likewise.
* ppc-sysv-tdep.c: Likewise.
* probe.c: Likewise.
* record-btrace.c: Likewise.
* remote.c: Likewise.
* rs6000-tdep.c: Likewise.
* rx-tdep.c: Likewise.
* s390-linux-nat.c: Likewise.
* s390-linux-tdep.c: Likewise.
* ser-tcp.c: Likewise.
* sh-tdep.c: Likewise.
* sh64-tdep.c: Likewise.
* source.c: Likewise.
* sparc-tdep.c: Likewise.
* symfile.c: Likewise.
* target-memory.c: Likewise.
* target.c: Likewise.
* tic6x-tdep.c: Likewise.
* tilegx-tdep.c: Likewise.
* tracefile-tfile.c: Likewise.
* tracepoint.c: Likewise.
* valprint.c: Likewise.
* value.c: Likewise.
* xtensa-tdep.c: Likewise.
* cli/cli-cmds.c: Likewise.
* compile/compile-object-load.c: Likewise.
2016-09-17 02:55:17 +08:00
|
|
|
adv = std::min (space, stride);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
stride -= adv;
|
|
|
|
index += adv;
|
|
|
|
steps += adv;
|
|
|
|
|
|
|
|
/* Move to the next function if we're at the end of this one. */
|
|
|
|
if (index == end)
|
|
|
|
{
|
|
|
|
const struct btrace_function *next;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
if (next == NULL)
|
|
|
|
{
|
|
|
|
/* We stepped past the last function.
|
|
|
|
|
|
|
|
Let's adjust the index to point to the last instruction in
|
|
|
|
the previous function. */
|
|
|
|
index -= 1;
|
|
|
|
steps -= 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We now point to the first instruction in the new function. */
|
|
|
|
bfun = next;
|
|
|
|
index = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We did make progress. */
|
|
|
|
gdb_assert (adv > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the iterator. */
|
2017-05-30 18:47:37 +08:00
|
|
|
it->call_index = bfun->number - 1;
|
|
|
|
it->insn_index = index;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
return steps;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
unsigned int
|
|
|
|
btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
|
|
|
|
{
|
|
|
|
const struct btrace_function *bfun;
|
|
|
|
unsigned int index, steps;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &it->btinfo->functions[it->call_index];
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
steps = 0;
|
2017-05-30 18:47:37 +08:00
|
|
|
index = it->insn_index;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
while (stride != 0)
|
|
|
|
{
|
|
|
|
unsigned int adv;
|
|
|
|
|
|
|
|
/* Move to the previous function if we're at the start of this one. */
|
|
|
|
if (index == 0)
|
|
|
|
{
|
|
|
|
const struct btrace_function *prev;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
if (prev == NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* We point to one after the last instruction in the new function. */
|
|
|
|
bfun = prev;
|
btrace: Store btrace_insn in an std::vector
Because it contains a non-POD type field (flags), the type btrace_insn
should be new'ed/delete'd. Replace the VEC (btrace_insn_s) in
btrace_function with an std::vector.
gdb/ChangeLog:
* btrace.h (btrace_insn_s, DEF_VEC_O (btrace_insn_s)): Remove.
(btrace_function) <insn>: Change type to use std::vector.
* btrace.c (ftrace_debug, ftrace_call_num_insn,
ftrace_find_call, ftrace_new_gap, ftrace_update_function,
ftrace_update_insns, ftrace_compute_global_level_offset,
btrace_stitch_bts, btrace_clear, btrace_insn_get,
btrace_insn_end, btrace_insn_next, btrace_insn_prev): Adjust to
change to std::vector.
(ftrace_update_insns): Adjust to change to std::vector, change
type of INSN parameter.
(btrace_compute_ftrace_bts): Adjust call to ftrace_update_insns.
* record-btrace.c (btrace_call_history_insn_range,
btrace_compute_src_line_range,
record_btrace_frame_prev_register): Adjust to change to
std::vector.
* python/py-record-btrace.c (recpy_bt_func_instructions): Adjust
to change to std::vector.
2017-09-04 16:46:36 +08:00
|
|
|
index = bfun->insn.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
/* An empty function segment represents a gap in the trace. We count
|
|
|
|
it as one instruction. */
|
|
|
|
if (index == 0)
|
|
|
|
{
|
|
|
|
stride -= 1;
|
|
|
|
steps += 1;
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Advance the iterator as far as possible within this segment. */
|
gdb: Use std::min and std::max throughout
Otherwise including <string> or some other C++ header is broken.
E.g.:
In file included from /opt/gcc/include/c++/7.0.0/bits/char_traits.h:39:0,
from /opt/gcc/include/c++/7.0.0/string:40,
from /home/pedro/gdb/mygit/cxx-convertion/src/gdb/infrun.c:68:
/opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:243:56: error: macro "min" passed 3 arguments, but takes just 2
min(const _Tp& __a, const _Tp& __b, _Compare __comp)
^
/opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:265:56: error: macro "max" passed 3 arguments, but takes just 2
max(const _Tp& __a, const _Tp& __b, _Compare __comp)
^
In file included from .../src/gdb/infrun.c:21:0:
To the best of my grepping abilities, I believe I adjusted all min/max
calls.
gdb/ChangeLog:
2016-09-16 Pedro Alves <palves@redhat.com>
* defs.h (min, max): Delete.
* aarch64-tdep.c: Include <algorithm> and use std::min and
std::max throughout.
* aarch64-tdep.c: Likewise.
* alpha-tdep.c: Likewise.
* amd64-tdep.c: Likewise.
* amd64-windows-tdep.c: Likewise.
* arm-tdep.c: Likewise.
* avr-tdep.c: Likewise.
* breakpoint.c: Likewise.
* btrace.c: Likewise.
* ctf.c: Likewise.
* disasm.c: Likewise.
* doublest.c: Likewise.
* dwarf2loc.c: Likewise.
* dwarf2read.c: Likewise.
* environ.c: Likewise.
* exec.c: Likewise.
* f-exp.y: Likewise.
* findcmd.c: Likewise.
* ft32-tdep.c: Likewise.
* gcore.c: Likewise.
* hppa-tdep.c: Likewise.
* i386-darwin-tdep.c: Likewise.
* i386-tdep.c: Likewise.
* linux-thread-db.c: Likewise.
* lm32-tdep.c: Likewise.
* m32r-tdep.c: Likewise.
* m88k-tdep.c: Likewise.
* memrange.c: Likewise.
* minidebug.c: Likewise.
* mips-tdep.c: Likewise.
* moxie-tdep.c: Likewise.
* nds32-tdep.c: Likewise.
* nios2-tdep.c: Likewise.
* nto-procfs.c: Likewise.
* parse.c: Likewise.
* ppc-sysv-tdep.c: Likewise.
* probe.c: Likewise.
* record-btrace.c: Likewise.
* remote.c: Likewise.
* rs6000-tdep.c: Likewise.
* rx-tdep.c: Likewise.
* s390-linux-nat.c: Likewise.
* s390-linux-tdep.c: Likewise.
* ser-tcp.c: Likewise.
* sh-tdep.c: Likewise.
* sh64-tdep.c: Likewise.
* source.c: Likewise.
* sparc-tdep.c: Likewise.
* symfile.c: Likewise.
* target-memory.c: Likewise.
* target.c: Likewise.
* tic6x-tdep.c: Likewise.
* tilegx-tdep.c: Likewise.
* tracefile-tfile.c: Likewise.
* tracepoint.c: Likewise.
* valprint.c: Likewise.
* value.c: Likewise.
* xtensa-tdep.c: Likewise.
* cli/cli-cmds.c: Likewise.
* compile/compile-object-load.c: Likewise.
2016-09-17 02:55:17 +08:00
|
|
|
adv = std::min (index, stride);
|
record-btrace: indicate gaps
Indicate gaps in the trace due to decode errors. Internally, a gap is
represented as a btrace function segment without instructions and with a
non-zero format-specific error code.
Show the gap when traversing the instruction or function call history.
Also indicate gaps in "info record".
It looks like this:
(gdb) info record
Active record target: record-btrace
Recording format: Branch Trace Store.
Buffer size: 64KB.
Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182).
(gdb) record function-call-history /cli
1 fib inst 1,9 at src/fib.c:9,14
2 fib inst 10,20 at src/fib.c:6,14
3 [decode error (1): instruction overflow]
4 fib inst 21,28 at src/fib.c:11,14
5 fib inst 29,33 at src/fib.c:6,9
(gdb) record instruction-history 20,22
20 0x000000000040062f <fib+47>: sub $0x1,%rax
[decode error (1): instruction overflow]
21 0x0000000000400613 <fib+19>: add $0x1,%rax
22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip)
(gdb)
Gaps are ignored during reverse execution and replay.
2015-02-09 Markus Metzger <markus.t.metzger@intel.com>
* btrace.c (ftrace_find_call): Skip gaps.
(ftrace_new_function): Initialize level.
(ftrace_new_call, ftrace_new_tailcall, ftrace_new_return)
(ftrace_new_switch): Update
level computation.
(ftrace_new_gap): New.
(ftrace_update_function): Create new function after gap.
(btrace_compute_ftrace_bts): Create gap on error.
(btrace_stitch_bts): Update parameters. Clear trace if it
becomes empty.
(btrace_stitch_trace): Update parameters. Update callers.
(btrace_clear): Reset the number of gaps.
(btrace_insn_get): Return NULL if the iterator points to a gap.
(btrace_insn_number): Return zero if the iterator points to a gap.
(btrace_insn_end): Allow gaps at the end.
(btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps.
(btrace_find_insn_by_number): Assert that the found iterator does
not point to a gap.
(btrace_call_next, btrace_call_prev): Assert that the last function
is not a gap.
* btrace.h (btrace_bts_error): New.
(btrace_function): Update comment.
(btrace_function) <insn, insn_offset, number>: Update comment.
(btrace_function) <errcode>: New.
(btrace_thread_info) <ngaps>: New.
(btrace_thread_info) <replay>: Update comment.
(btrace_insn_get): Update comment.
* record-btrace.c (btrace_ui_out_decode_error): New.
(record_btrace_info): Print number of gaps.
(btrace_insn_history, btrace_call_history): Call
btrace_ui_out_decode_error for gaps.
(record_btrace_step_thread, record_btrace_start_replaying): Skip gaps.
testsuite/
* gdb.btrace/buffer-size.exp: Update "info record" output.
* gdb.btrace/delta.exp: Update "info record" output.
* gdb.btrace/enable.exp: Update "info record" output.
* gdb.btrace/finish.exp: Update "info record" output.
* gdb.btrace/instruction_history.exp: Update "info record" output.
* gdb.btrace/next.exp: Update "info record" output.
* gdb.btrace/nexti.exp: Update "info record" output.
* gdb.btrace/step.exp: Update "info record" output.
* gdb.btrace/stepi.exp: Update "info record" output.
* gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
stride -= adv;
|
|
|
|
index -= adv;
|
|
|
|
steps += adv;
|
|
|
|
|
|
|
|
/* We did make progress. */
|
|
|
|
gdb_assert (adv > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the iterator. */
|
2017-05-30 18:47:37 +08:00
|
|
|
it->call_index = bfun->number - 1;
|
|
|
|
it->insn_index = index;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
return steps;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
int
|
|
|
|
btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
|
|
|
|
const struct btrace_insn_iterator *rhs)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
gdb_assert (lhs->btinfo == rhs->btinfo);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (lhs->call_index != rhs->call_index)
|
|
|
|
return lhs->call_index - rhs->call_index;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
return lhs->insn_index - rhs->insn_index;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
int
|
|
|
|
btrace_find_insn_by_number (struct btrace_insn_iterator *it,
|
|
|
|
const struct btrace_thread_info *btinfo,
|
|
|
|
unsigned int number)
|
|
|
|
{
|
|
|
|
const struct btrace_function *bfun;
|
2016-11-21 23:39:57 +08:00
|
|
|
unsigned int upper, lower;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
2016-11-21 23:39:57 +08:00
|
|
|
return 0;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2016-11-21 23:39:57 +08:00
|
|
|
lower = 0;
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &btinfo->functions[lower];
|
2016-11-21 23:39:57 +08:00
|
|
|
if (number < bfun->insn_offset)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
return 0;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
upper = btinfo->functions.size () - 1;
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &btinfo->functions[upper];
|
2016-11-21 23:39:57 +08:00
|
|
|
if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
return 0;
|
|
|
|
|
2016-11-21 23:39:57 +08:00
|
|
|
/* We assume that there are no holes in the numbering. */
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
const unsigned int average = lower + (upper - lower) / 2;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &btinfo->functions[average];
|
2016-11-21 23:39:57 +08:00
|
|
|
|
|
|
|
if (number < bfun->insn_offset)
|
|
|
|
{
|
|
|
|
upper = average - 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
|
|
|
|
{
|
|
|
|
lower = average + 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
it->btinfo = btinfo;
|
2017-05-30 18:47:37 +08:00
|
|
|
it->call_index = bfun->number - 1;
|
|
|
|
it->insn_index = number - bfun->insn_offset;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* Returns true if the recording ends with a function segment that
|
|
|
|
contains only a single (i.e. the current) instruction. */
|
|
|
|
|
|
|
|
static bool
|
|
|
|
btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
|
|
|
|
{
|
|
|
|
const btrace_function *bfun;
|
|
|
|
|
|
|
|
if (btinfo->functions.empty ())
|
|
|
|
return false;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
bfun = &btinfo->functions.back ();
|
2017-05-30 18:47:37 +08:00
|
|
|
if (bfun->errcode != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return ftrace_call_num_insn (bfun) == 1;
|
|
|
|
}
|
|
|
|
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
const struct btrace_function *
|
|
|
|
btrace_call_get (const struct btrace_call_iterator *it)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
if (it->index >= it->btinfo->functions.size ())
|
|
|
|
return NULL;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
return &it->btinfo->functions[it->index];
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
unsigned int
|
|
|
|
btrace_call_number (const struct btrace_call_iterator *it)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const unsigned int length = it->btinfo->functions.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* If the last function segment contains only a single instruction (i.e. the
|
|
|
|
current instruction), skip it. */
|
|
|
|
if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
|
|
|
|
return length;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
return it->index + 1;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_call_begin (struct btrace_call_iterator *it,
|
|
|
|
const struct btrace_thread_info *btinfo)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
error (_("No trace."));
|
|
|
|
|
|
|
|
it->btinfo = btinfo;
|
2017-05-30 18:47:37 +08:00
|
|
|
it->index = 0;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_call_end (struct btrace_call_iterator *it,
|
|
|
|
const struct btrace_thread_info *btinfo)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
error (_("No trace."));
|
|
|
|
|
|
|
|
it->btinfo = btinfo;
|
2017-05-30 18:47:37 +08:00
|
|
|
it->index = btinfo->functions.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
unsigned int
|
|
|
|
btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const unsigned int length = it->btinfo->functions.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (it->index + stride < length - 1)
|
|
|
|
/* Default case: Simply advance the iterator. */
|
|
|
|
it->index += stride;
|
|
|
|
else if (it->index + stride == length - 1)
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
/* We land exactly at the last function segment. If it contains only one
|
|
|
|
instruction (i.e. the current instruction) it is not actually part of
|
|
|
|
the trace. */
|
|
|
|
if (btrace_ends_with_single_insn (it->btinfo))
|
|
|
|
it->index = length;
|
|
|
|
else
|
|
|
|
it->index = length - 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We land past the last function segment and have to adjust the stride.
|
|
|
|
If the last function segment contains only one instruction (i.e. the
|
|
|
|
current instruction) it is not actually part of the trace. */
|
|
|
|
if (btrace_ends_with_single_insn (it->btinfo))
|
|
|
|
stride = length - it->index - 1;
|
|
|
|
else
|
|
|
|
stride = length - it->index;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
it->index = length;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
return stride;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
unsigned int
|
|
|
|
btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const unsigned int length = it->btinfo->functions.size ();
|
|
|
|
int steps = 0;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
gdb_assert (it->index <= length);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (stride == 0 || it->index == 0)
|
|
|
|
return 0;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
/* If we are at the end, the first step is a special case. If the last
|
|
|
|
function segment contains only one instruction (i.e. the current
|
|
|
|
instruction) it is not actually part of the trace. To be able to step
|
|
|
|
over this instruction, we need at least one more function segment. */
|
|
|
|
if ((it->index == length) && (length > 1))
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btrace_ends_with_single_insn (it->btinfo))
|
|
|
|
it->index = length - 2;
|
|
|
|
else
|
|
|
|
it->index = length - 1;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
steps = 1;
|
|
|
|
stride -= 1;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
stride = std::min (stride, it->index);
|
|
|
|
|
|
|
|
it->index -= stride;
|
|
|
|
return steps + stride;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
int
|
|
|
|
btrace_call_cmp (const struct btrace_call_iterator *lhs,
|
|
|
|
const struct btrace_call_iterator *rhs)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
gdb_assert (lhs->btinfo == rhs->btinfo);
|
|
|
|
return (int) (lhs->index - rhs->index);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
int
|
|
|
|
btrace_find_call_by_number (struct btrace_call_iterator *it,
|
|
|
|
const struct btrace_thread_info *btinfo,
|
|
|
|
unsigned int number)
|
|
|
|
{
|
2017-05-30 18:47:37 +08:00
|
|
|
const unsigned int length = btinfo->functions.size ();
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if ((number == 0) || (number > length))
|
|
|
|
return 0;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
it->btinfo = btinfo;
|
|
|
|
it->index = number - 1;
|
|
|
|
return 1;
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_set_insn_history (struct btrace_thread_info *btinfo,
|
|
|
|
const struct btrace_insn_iterator *begin,
|
|
|
|
const struct btrace_insn_iterator *end)
|
|
|
|
{
|
|
|
|
if (btinfo->insn_history == NULL)
|
Replace some xmalloc-family functions with XNEW-family ones
This patch is part of the make-gdb-buildable-in-C++ effort. The idea is
to change some calls to the xmalloc family of functions to calls to the
equivalents in the XNEW family. This avoids adding an explicit cast, so
it keeps the code a bit more readable. Some of them also map relatively
well to a C++ equivalent (XNEW (struct foo) -> new foo), so it will be
possible to do scripted replacements if needed.
I only changed calls that were obviously allocating memory for one or
multiple "objects". Allocation of variable sizes (such as strings or
buffer handling) will be for later (and won't use XNEW).
- xmalloc (sizeof (struct foo)) -> XNEW (struct foo)
- xmalloc (num * sizeof (struct foo)) -> XNEWVEC (struct foo, num)
- xcalloc (1, sizeof (struct foo)) -> XCNEW (struct foo)
- xcalloc (num, sizeof (struct foo)) -> XCNEWVEC (struct foo, num)
- xrealloc (p, num * sizeof (struct foo) -> XRESIZEVEC (struct foo, p, num)
- obstack_alloc (ob, sizeof (struct foo)) -> XOBNEW (ob, struct foo)
- obstack_alloc (ob, num * sizeof (struct foo)) -> XOBNEWVEC (ob, struct foo, num)
- alloca (sizeof (struct foo)) -> XALLOCA (struct foo)
- alloca (num * sizeof (struct foo)) -> XALLOCAVEC (struct foo, num)
Some instances of xmalloc followed by memset to zero the buffer were
replaced by XCNEW or XCNEWVEC.
I regtested on x86-64, Ubuntu 14.04, but the patch touches many
architecture-specific files. For those I'll have to rely on the
buildbot or people complaining that I broke their gdb.
gdb/ChangeLog:
* aarch64-linux-nat.c (aarch64_add_process): Likewise.
* aarch64-tdep.c (aarch64_gdbarch_init): Likewise.
* ada-exp.y (write_ambiguous_var): Likewise.
* ada-lang.c (resolve_subexp): Likewise.
(user_select_syms): Likewise.
(assign_aggregate): Likewise.
(ada_evaluate_subexp): Likewise.
(cache_symbol): Likewise.
* addrmap.c (allocate_key): Likewise.
(addrmap_create_mutable): Likewise.
* aix-thread.c (sync_threadlists): Likewise.
* alpha-tdep.c (alpha_push_dummy_call): Likewise.
(alpha_gdbarch_init): Likewise.
* amd64-windows-tdep.c (amd64_windows_push_arguments): Likewise.
* arm-linux-nat.c (arm_linux_add_process): Likewise.
* arm-linux-tdep.c (arm_linux_displaced_step_copy_insn): Likewise.
* arm-tdep.c (push_stack_item): Likewise.
(arm_displaced_step_copy_insn): Likewise.
(arm_gdbarch_init): Likewise.
(_initialize_arm_tdep): Likewise.
* avr-tdep.c (push_stack_item): Likewise.
* ax-general.c (new_agent_expr): Likewise.
* block.c (block_initialize_namespace): Likewise.
* breakpoint.c (alloc_counted_command_line): Likewise.
(update_dprintf_command_list): Likewise.
(parse_breakpoint_sals): Likewise.
(decode_static_tracepoint_spec): Likewise.
(until_break_command): Likewise.
(clear_command): Likewise.
(update_global_location_list): Likewise.
(get_breakpoint_objfile_data) Likewise.
* btrace.c (ftrace_new_function): Likewise.
(btrace_set_insn_history): Likewise.
(btrace_set_call_history): Likewise.
* buildsym.c (add_symbol_to_list): Likewise.
(record_pending_block): Likewise.
(start_subfile): Likewise.
(start_buildsym_compunit): Likewise.
(push_subfile): Likewise.
(end_symtab_get_static_block): Likewise.
(buildsym_init): Likewise.
* cli/cli-cmds.c (source_command): Likewise.
* cli/cli-decode.c (add_cmd): Likewise.
* cli/cli-script.c (build_command_line): Likewise.
(setup_user_args): Likewise.
(realloc_body_list): Likewise.
(process_next_line): Likewise.
(copy_command_lines): Likewise.
* cli/cli-setshow.c (do_set_command): Likewise.
* coff-pe-read.c (read_pe_exported_syms): Likewise.
* coffread.c (coff_locate_sections): Likewise.
(coff_symtab_read): Likewise.
(coff_read_struct_type): Likewise.
* common/cleanups.c (make_my_cleanup2): Likewise.
* common/common-exceptions.c (throw_it): Likewise.
* common/filestuff.c (make_cleanup_close): Likewise.
* common/format.c (parse_format_string): Likewise.
* common/queue.h (DEFINE_QUEUE_P): Likewise.
* compile/compile-object-load.c (munmap_list_add): Likewise.
(compile_object_load): Likewise.
* compile/compile-object-run.c (compile_object_run): Likewise.
* compile/compile.c (append_args): Likewise.
* corefile.c (specify_exec_file_hook): Likewise.
* cp-support.c (make_symbol_overload_list): Likewise.
* cris-tdep.c (push_stack_item): Likewise.
(cris_gdbarch_init): Likewise.
* ctf.c (ctf_trace_file_writer_new): Likewise.
* dbxread.c (init_header_files): Likewise.
(add_new_header_file): Likewise.
(init_bincl_list): Likewise.
(dbx_end_psymtab): Likewise.
(start_psymtab): Likewise.
(dbx_end_psymtab): Likewise.
* dcache.c (dcache_init): Likewise.
* dictionary.c (dict_create_hashed): Likewise.
(dict_create_hashed_expandable): Likewise.
(dict_create_linear): Likewise.
(dict_create_linear_expandable): Likewise.
* dtrace-probe.c (dtrace_process_dof_probe): Likewise.
* dummy-frame.c (register_dummy_frame_dtor): Likewise.
* dwarf2-frame-tailcall.c (cache_new_ref1): Likewise.
* dwarf2-frame.c (dwarf2_build_frame_info): Likewise.
(decode_frame_entry_1): Likewise.
* dwarf2expr.c (new_dwarf_expr_context): Likewise.
* dwarf2loc.c (dwarf2_compile_expr_to_ax): Likewise.
* dwarf2read.c (dwarf2_has_info): Likewise.
(create_signatured_type_table_from_index): Likewise.
(dwarf2_read_index): Likewise.
(dw2_get_file_names_reader): Likewise.
(create_all_type_units): Likewise.
(read_cutu_die_from_dwo): Likewise.
(init_tu_and_read_dwo_dies): Likewise.
(init_cutu_and_read_dies): Likewise.
(create_all_comp_units): Likewise.
(queue_comp_unit): Likewise.
(inherit_abstract_dies): Likewise.
(read_call_site_scope): Likewise.
(dwarf2_add_field): Likewise.
(dwarf2_add_typedef): Likewise.
(dwarf2_add_member_fn): Likewise.
(attr_to_dynamic_prop): Likewise.
(abbrev_table_alloc_abbrev): Likewise.
(abbrev_table_read_table): Likewise.
(add_include_dir): Likewise.
(add_file_name): Likewise.
(dwarf_decode_line_header): Likewise.
(dwarf2_const_value_attr): Likewise.
(dwarf_alloc_block): Likewise.
(parse_macro_definition): Likewise.
(set_die_type): Likewise.
(write_psymtabs_to_index): Likewise.
(create_cus_from_index): Likewise.
(dwarf2_create_include_psymtab): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(build_type_psymtab_dependencies): Likewise.
(read_comp_units_from_section): Likewise.
(compute_compunit_symtab_includes): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(read_func_scope): Likewise.
(process_structure_scope): Likewise.
(mark_common_block_symbol_computed): Likewise.
(load_partial_dies): Likewise.
(dwarf2_symbol_mark_computed): Likewise.
* elfread.c (elf_symfile_segments): Likewise.
(elf_read_minimal_symbols): Likewise.
* environ.c (make_environ): Likewise.
* eval.c (evaluate_subexp_standard): Likewise.
* event-loop.c (create_file_handler): Likewise.
(create_async_signal_handler): Likewise.
(create_async_event_handler): Likewise.
(create_timer): Likewise.
* exec.c (build_section_table): Likewise.
* fbsd-nat.c (fbsd_remember_child): Likewise.
* fork-child.c (fork_inferior): Likewise.
* frv-tdep.c (new_variant): Likewise.
* gdbarch.sh (gdbarch_alloc): Likewise.
(append_name): Likewise.
* gdbtypes.c (rank_function): Likewise.
(copy_type_recursive): Likewise.
(add_dyn_prop): Likewise.
* gnu-nat.c (make_proc): Likewise.
(make_inf): Likewise.
(gnu_write_inferior): Likewise.
* gnu-v3-abi.c (build_gdb_vtable_type): Likewise.
(build_std_type_info_type): Likewise.
* guile/scm-param.c (compute_enum_list): Likewise.
* guile/scm-utils.c (gdbscm_parse_function_args): Likewise.
* guile/scm-value.c (gdbscm_value_call): Likewise.
* h8300-tdep.c (h8300_gdbarch_init): Likewise.
* hppa-tdep.c (hppa_init_objfile_priv_data): Likewise.
(read_unwind_info): Likewise.
* ia64-tdep.c (ia64_gdbarch_init): Likewise.
* infcall.c (dummy_frame_context_saver_setup): Likewise.
(call_function_by_hand_dummy): Likewise.
* infcmd.c (step_once): Likewise.
(finish_forward): Likewise.
(attach_command): Likewise.
(notice_new_inferior): Likewise.
* inferior.c (add_inferior_silent): Likewise.
* infrun.c (add_displaced_stepping_state): Likewise.
(save_infcall_control_state): Likewise.
(save_inferior_ptid): Likewise.
(_initialize_infrun): Likewise.
* jit.c (bfd_open_from_target_memory): Likewise.
(jit_gdbarch_data_init): Likewise.
* language.c (add_language): Likewise.
* linespec.c (decode_line_2): Likewise.
* linux-nat.c (add_to_pid_list): Likewise.
(add_initial_lwp): Likewise.
* linux-thread-db.c (add_thread_db_info): Likewise.
(record_thread): Likewise.
(info_auto_load_libthread_db): Likewise.
* m32c-tdep.c (m32c_gdbarch_init): Likewise.
* m68hc11-tdep.c (m68hc11_gdbarch_init): Likewise.
* m68k-tdep.c (m68k_gdbarch_init): Likewise.
* m88k-tdep.c (m88k_analyze_prologue): Likewise.
* macrocmd.c (macro_define_command): Likewise.
* macroexp.c (gather_arguments): Likewise.
* macroscope.c (sal_macro_scope): Likewise.
* macrotab.c (new_macro_table): Likewise.
* mdebugread.c (push_parse_stack): Likewise.
(parse_partial_symbols): Likewise.
(parse_symbol): Likewise.
(psymtab_to_symtab_1): Likewise.
(new_block): Likewise.
(new_psymtab): Likewise.
(mdebug_build_psymtabs): Likewise.
(add_pending): Likewise.
(elfmdebug_build_psymtabs): Likewise.
* mep-tdep.c (mep_gdbarch_init): Likewise.
* mi/mi-main.c (mi_execute_command): Likewise.
* mi/mi-parse.c (mi_parse_argv): Likewise.
* minidebug.c (lzma_open): Likewise.
* minsyms.c (terminate_minimal_symbol_table): Likewise.
* mips-linux-nat.c (mips_linux_insert_watchpoint): Likewise.
* mips-tdep.c (mips_gdbarch_init): Likewise.
* mn10300-tdep.c (mn10300_gdbarch_init): Likewise.
* msp430-tdep.c (msp430_gdbarch_init): Likewise.
* mt-tdep.c (mt_registers_info): Likewise.
* nat/aarch64-linux.c (aarch64_linux_new_thread): Likewise.
* nat/linux-btrace.c (linux_enable_bts): Likewise.
(linux_enable_pt): Likewise.
* nat/linux-osdata.c (linux_xfer_osdata_processes): Likewise.
(linux_xfer_osdata_processgroups): Likewise.
* nios2-tdep.c (nios2_gdbarch_init): Likewise.
* nto-procfs.c (procfs_meminfo): Likewise.
* objc-lang.c (start_msglist): Likewise.
(selectors_info): Likewise.
(classes_info): Likewise.
(find_methods): Likewise.
* objfiles.c (allocate_objfile): Likewise.
(update_section_map): Likewise.
* osabi.c (gdbarch_register_osabi): Likewise.
(gdbarch_register_osabi_sniffer): Likewise.
* parse.c (start_arglist): Likewise.
* ppc-linux-nat.c (hwdebug_find_thread_points_by_tid): Likewise.
(hwdebug_insert_point): Likewise.
* printcmd.c (display_command): Likewise.
(ui_printf): Likewise.
* procfs.c (create_procinfo): Likewise.
(load_syscalls): Likewise.
(proc_get_LDT_entry): Likewise.
(proc_update_threads): Likewise.
* prologue-value.c (make_pv_area): Likewise.
(pv_area_store): Likewise.
* psymtab.c (extend_psymbol_list): Likewise.
(init_psymbol_list): Likewise.
(allocate_psymtab): Likewise.
* python/py-inferior.c (add_thread_object): Likewise.
* python/py-param.c (compute_enum_values): Likewise.
* python/py-value.c (valpy_call): Likewise.
* python/py-varobj.c (py_varobj_iter_next): Likewise.
* python/python.c (ensure_python_env): Likewise.
* record-btrace.c (record_btrace_start_replaying): Likewise.
* record-full.c (record_full_reg_alloc): Likewise.
(record_full_mem_alloc): Likewise.
(record_full_end_alloc): Likewise.
(record_full_core_xfer_partial): Likewise.
* regcache.c (get_thread_arch_aspace_regcache): Likewise.
* remote-fileio.c (remote_fileio_init_fd_map): Likewise.
* remote-notif.c (remote_notif_state_allocate): Likewise.
* remote.c (demand_private_info): Likewise.
(remote_notif_stop_alloc_reply): Likewise.
(remote_enable_btrace): Likewise.
* reverse.c (save_bookmark_command): Likewise.
* rl78-tdep.c (rl78_gdbarch_init): Likewise.
* rx-tdep.c (rx_gdbarch_init): Likewise.
* s390-linux-nat.c (s390_insert_watchpoint): Likewise.
* ser-go32.c (dos_get_tty_state): Likewise.
(dos_copy_tty_state): Likewise.
* ser-mingw.c (ser_windows_open): Likewise.
(ser_console_wait_handle): Likewise.
(ser_console_get_tty_state): Likewise.
(make_pipe_state): Likewise.
(net_windows_open): Likewise.
* ser-unix.c (hardwire_get_tty_state): Likewise.
(hardwire_copy_tty_state): Likewise.
* solib-aix.c (solib_aix_new_lm_info): Likewise.
* solib-dsbt.c (dsbt_current_sos): Likewise.
(dsbt_relocate_main_executable): Likewise.
* solib-frv.c (frv_current_sos): Likewise.
(frv_relocate_main_executable): Likewise.
* solib-spu.c (spu_bfd_fopen): Likewise.
* solib-svr4.c (lm_info_read): Likewise.
(svr4_copy_library_list): Likewise.
(svr4_default_sos): Likewise.
* source.c (find_source_lines): Likewise.
(line_info): Likewise.
(add_substitute_path_rule): Likewise.
* spu-linux-nat.c (spu_bfd_open): Likewise.
* spu-tdep.c (info_spu_dma_cmdlist): Likewise.
* stabsread.c (dbx_lookup_type): Likewise.
(read_type): Likewise.
(read_member_functions): Likewise.
(read_struct_fields): Likewise.
(read_baseclasses): Likewise.
(read_args): Likewise.
(_initialize_stabsread): Likewise.
* stack.c (func_command): Likewise.
* stap-probe.c (handle_stap_probe): Likewise.
* symfile.c (addrs_section_sort): Likewise.
(addr_info_make_relative): Likewise.
(load_section_callback): Likewise.
(add_symbol_file_command): Likewise.
(init_filename_language_table): Likewise.
* symtab.c (create_filename_seen_cache): Likewise.
(sort_search_symbols_remove_dups): Likewise.
(search_symbols): Likewise.
* target.c (make_cleanup_restore_target_terminal): Likewise.
* thread.c (new_thread): Likewise.
(enable_thread_stack_temporaries): Likewise.
(make_cleanup_restore_current_thread): Likewise.
(thread_apply_all_command): Likewise.
* tic6x-tdep.c (tic6x_gdbarch_init): Likewise.
* top.c (gdb_readline_wrapper): Likewise.
* tracefile-tfile.c (tfile_trace_file_writer_new): Likewise.
* tracepoint.c (trace_find_line_command): Likewise.
(all_tracepoint_actions_and_cleanup): Likewise.
(make_cleanup_restore_current_traceframe): Likewise.
(get_uploaded_tp): Likewise.
(get_uploaded_tsv): Likewise.
* tui/tui-data.c (tui_alloc_generic_win_info): Likewise.
(tui_alloc_win_info): Likewise.
(tui_alloc_content): Likewise.
(tui_add_content_elements): Likewise.
* tui/tui-disasm.c (tui_find_disassembly_address): Likewise.
(tui_set_disassem_content): Likewise.
* ui-file.c (ui_file_new): Likewise.
(stdio_file_new): Likewise.
(tee_file_new): Likewise.
* utils.c (make_cleanup_restore_integer): Likewise.
(add_internal_problem_command): Likewise.
* v850-tdep.c (v850_gdbarch_init): Likewise.
* valops.c (find_oload_champ): Likewise.
* value.c (allocate_value_lazy): Likewise.
(record_latest_value): Likewise.
(create_internalvar): Likewise.
* varobj.c (install_variable): Likewise.
(new_variable): Likewise.
(new_root_variable): Likewise.
(cppush): Likewise.
(_initialize_varobj): Likewise.
* windows-nat.c (windows_make_so): Likewise.
* x86-nat.c (x86_add_process): Likewise.
* xcoffread.c (arrange_linetable): Likewise.
(allocate_include_entry): Likewise.
(process_linenos): Likewise.
(SYMBOL_DUP): Likewise.
(xcoff_start_psymtab): Likewise.
(xcoff_end_psymtab): Likewise.
* xml-support.c (gdb_xml_parse_attr_ulongest): Likewise.
* xtensa-tdep.c (xtensa_register_type): Likewise.
* gdbarch.c: Regenerate.
* gdbarch.h: Regenerate.
gdb/gdbserver/ChangeLog:
* ax.c (gdb_parse_agent_expr): Likewise.
(compile_bytecodes): Likewise.
* dll.c (loaded_dll): Likewise.
* event-loop.c (append_callback_event): Likewise.
(create_file_handler): Likewise.
(create_file_event): Likewise.
* hostio.c (handle_open): Likewise.
* inferiors.c (add_thread): Likewise.
(add_process): Likewise.
* linux-aarch64-low.c (aarch64_linux_new_process): Likewise.
* linux-arm-low.c (arm_new_process): Likewise.
(arm_new_thread): Likewise.
* linux-low.c (add_to_pid_list): Likewise.
(linux_add_process): Likewise.
(handle_extended_wait): Likewise.
(add_lwp): Likewise.
(enqueue_one_deferred_signal): Likewise.
(enqueue_pending_signal): Likewise.
(linux_resume_one_lwp_throw): Likewise.
(linux_resume_one_thread): Likewise.
(linux_read_memory): Likewise.
(linux_write_memory): Likewise.
* linux-mips-low.c (mips_linux_new_process): Likewise.
(mips_linux_new_thread): Likewise.
(mips_add_watchpoint): Likewise.
* linux-x86-low.c (initialize_low_arch): Likewise.
* lynx-low.c (lynx_add_process): Likewise.
* mem-break.c (set_raw_breakpoint_at): Likewise.
(set_breakpoint): Likewise.
(add_condition_to_breakpoint): Likewise.
(add_commands_to_breakpoint): Likewise.
(clone_agent_expr): Likewise.
(clone_one_breakpoint): Likewise.
* regcache.c (new_register_cache): Likewise.
* remote-utils.c (look_up_one_symbol): Likewise.
* server.c (queue_stop_reply): Likewise.
(start_inferior): Likewise.
(queue_stop_reply_callback): Likewise.
(handle_target_event): Likewise.
* spu-low.c (fetch_ppc_memory): Likewise.
(store_ppc_memory): Likewise.
* target.c (set_target_ops): Likewise.
* thread-db.c (thread_db_load_search): Likewise.
(try_thread_db_load_1): Likewise.
* tracepoint.c (add_tracepoint): Likewise.
(add_tracepoint_action): Likewise.
(create_trace_state_variable): Likewise.
(cmd_qtdpsrc): Likewise.
(cmd_qtro): Likewise.
(add_while_stepping_state): Likewise.
* win32-low.c (child_add_thread): Likewise.
(get_image_name): Likewise.
2015-08-27 05:16:07 +08:00
|
|
|
btinfo->insn_history = XCNEW (struct btrace_insn_history);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
btinfo->insn_history->begin = *begin;
|
|
|
|
btinfo->insn_history->end = *end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
void
|
|
|
|
btrace_set_call_history (struct btrace_thread_info *btinfo,
|
|
|
|
const struct btrace_call_iterator *begin,
|
|
|
|
const struct btrace_call_iterator *end)
|
|
|
|
{
|
|
|
|
gdb_assert (begin->btinfo == end->btinfo);
|
|
|
|
|
|
|
|
if (btinfo->call_history == NULL)
|
Replace some xmalloc-family functions with XNEW-family ones
This patch is part of the make-gdb-buildable-in-C++ effort. The idea is
to change some calls to the xmalloc family of functions to calls to the
equivalents in the XNEW family. This avoids adding an explicit cast, so
it keeps the code a bit more readable. Some of them also map relatively
well to a C++ equivalent (XNEW (struct foo) -> new foo), so it will be
possible to do scripted replacements if needed.
I only changed calls that were obviously allocating memory for one or
multiple "objects". Allocation of variable sizes (such as strings or
buffer handling) will be for later (and won't use XNEW).
- xmalloc (sizeof (struct foo)) -> XNEW (struct foo)
- xmalloc (num * sizeof (struct foo)) -> XNEWVEC (struct foo, num)
- xcalloc (1, sizeof (struct foo)) -> XCNEW (struct foo)
- xcalloc (num, sizeof (struct foo)) -> XCNEWVEC (struct foo, num)
- xrealloc (p, num * sizeof (struct foo) -> XRESIZEVEC (struct foo, p, num)
- obstack_alloc (ob, sizeof (struct foo)) -> XOBNEW (ob, struct foo)
- obstack_alloc (ob, num * sizeof (struct foo)) -> XOBNEWVEC (ob, struct foo, num)
- alloca (sizeof (struct foo)) -> XALLOCA (struct foo)
- alloca (num * sizeof (struct foo)) -> XALLOCAVEC (struct foo, num)
Some instances of xmalloc followed by memset to zero the buffer were
replaced by XCNEW or XCNEWVEC.
I regtested on x86-64, Ubuntu 14.04, but the patch touches many
architecture-specific files. For those I'll have to rely on the
buildbot or people complaining that I broke their gdb.
gdb/ChangeLog:
* aarch64-linux-nat.c (aarch64_add_process): Likewise.
* aarch64-tdep.c (aarch64_gdbarch_init): Likewise.
* ada-exp.y (write_ambiguous_var): Likewise.
* ada-lang.c (resolve_subexp): Likewise.
(user_select_syms): Likewise.
(assign_aggregate): Likewise.
(ada_evaluate_subexp): Likewise.
(cache_symbol): Likewise.
* addrmap.c (allocate_key): Likewise.
(addrmap_create_mutable): Likewise.
* aix-thread.c (sync_threadlists): Likewise.
* alpha-tdep.c (alpha_push_dummy_call): Likewise.
(alpha_gdbarch_init): Likewise.
* amd64-windows-tdep.c (amd64_windows_push_arguments): Likewise.
* arm-linux-nat.c (arm_linux_add_process): Likewise.
* arm-linux-tdep.c (arm_linux_displaced_step_copy_insn): Likewise.
* arm-tdep.c (push_stack_item): Likewise.
(arm_displaced_step_copy_insn): Likewise.
(arm_gdbarch_init): Likewise.
(_initialize_arm_tdep): Likewise.
* avr-tdep.c (push_stack_item): Likewise.
* ax-general.c (new_agent_expr): Likewise.
* block.c (block_initialize_namespace): Likewise.
* breakpoint.c (alloc_counted_command_line): Likewise.
(update_dprintf_command_list): Likewise.
(parse_breakpoint_sals): Likewise.
(decode_static_tracepoint_spec): Likewise.
(until_break_command): Likewise.
(clear_command): Likewise.
(update_global_location_list): Likewise.
(get_breakpoint_objfile_data) Likewise.
* btrace.c (ftrace_new_function): Likewise.
(btrace_set_insn_history): Likewise.
(btrace_set_call_history): Likewise.
* buildsym.c (add_symbol_to_list): Likewise.
(record_pending_block): Likewise.
(start_subfile): Likewise.
(start_buildsym_compunit): Likewise.
(push_subfile): Likewise.
(end_symtab_get_static_block): Likewise.
(buildsym_init): Likewise.
* cli/cli-cmds.c (source_command): Likewise.
* cli/cli-decode.c (add_cmd): Likewise.
* cli/cli-script.c (build_command_line): Likewise.
(setup_user_args): Likewise.
(realloc_body_list): Likewise.
(process_next_line): Likewise.
(copy_command_lines): Likewise.
* cli/cli-setshow.c (do_set_command): Likewise.
* coff-pe-read.c (read_pe_exported_syms): Likewise.
* coffread.c (coff_locate_sections): Likewise.
(coff_symtab_read): Likewise.
(coff_read_struct_type): Likewise.
* common/cleanups.c (make_my_cleanup2): Likewise.
* common/common-exceptions.c (throw_it): Likewise.
* common/filestuff.c (make_cleanup_close): Likewise.
* common/format.c (parse_format_string): Likewise.
* common/queue.h (DEFINE_QUEUE_P): Likewise.
* compile/compile-object-load.c (munmap_list_add): Likewise.
(compile_object_load): Likewise.
* compile/compile-object-run.c (compile_object_run): Likewise.
* compile/compile.c (append_args): Likewise.
* corefile.c (specify_exec_file_hook): Likewise.
* cp-support.c (make_symbol_overload_list): Likewise.
* cris-tdep.c (push_stack_item): Likewise.
(cris_gdbarch_init): Likewise.
* ctf.c (ctf_trace_file_writer_new): Likewise.
* dbxread.c (init_header_files): Likewise.
(add_new_header_file): Likewise.
(init_bincl_list): Likewise.
(dbx_end_psymtab): Likewise.
(start_psymtab): Likewise.
(dbx_end_psymtab): Likewise.
* dcache.c (dcache_init): Likewise.
* dictionary.c (dict_create_hashed): Likewise.
(dict_create_hashed_expandable): Likewise.
(dict_create_linear): Likewise.
(dict_create_linear_expandable): Likewise.
* dtrace-probe.c (dtrace_process_dof_probe): Likewise.
* dummy-frame.c (register_dummy_frame_dtor): Likewise.
* dwarf2-frame-tailcall.c (cache_new_ref1): Likewise.
* dwarf2-frame.c (dwarf2_build_frame_info): Likewise.
(decode_frame_entry_1): Likewise.
* dwarf2expr.c (new_dwarf_expr_context): Likewise.
* dwarf2loc.c (dwarf2_compile_expr_to_ax): Likewise.
* dwarf2read.c (dwarf2_has_info): Likewise.
(create_signatured_type_table_from_index): Likewise.
(dwarf2_read_index): Likewise.
(dw2_get_file_names_reader): Likewise.
(create_all_type_units): Likewise.
(read_cutu_die_from_dwo): Likewise.
(init_tu_and_read_dwo_dies): Likewise.
(init_cutu_and_read_dies): Likewise.
(create_all_comp_units): Likewise.
(queue_comp_unit): Likewise.
(inherit_abstract_dies): Likewise.
(read_call_site_scope): Likewise.
(dwarf2_add_field): Likewise.
(dwarf2_add_typedef): Likewise.
(dwarf2_add_member_fn): Likewise.
(attr_to_dynamic_prop): Likewise.
(abbrev_table_alloc_abbrev): Likewise.
(abbrev_table_read_table): Likewise.
(add_include_dir): Likewise.
(add_file_name): Likewise.
(dwarf_decode_line_header): Likewise.
(dwarf2_const_value_attr): Likewise.
(dwarf_alloc_block): Likewise.
(parse_macro_definition): Likewise.
(set_die_type): Likewise.
(write_psymtabs_to_index): Likewise.
(create_cus_from_index): Likewise.
(dwarf2_create_include_psymtab): Likewise.
(process_psymtab_comp_unit_reader): Likewise.
(build_type_psymtab_dependencies): Likewise.
(read_comp_units_from_section): Likewise.
(compute_compunit_symtab_includes): Likewise.
(create_dwo_unit_in_dwp_v1): Likewise.
(create_dwo_unit_in_dwp_v2): Likewise.
(read_func_scope): Likewise.
(process_structure_scope): Likewise.
(mark_common_block_symbol_computed): Likewise.
(load_partial_dies): Likewise.
(dwarf2_symbol_mark_computed): Likewise.
* elfread.c (elf_symfile_segments): Likewise.
(elf_read_minimal_symbols): Likewise.
* environ.c (make_environ): Likewise.
* eval.c (evaluate_subexp_standard): Likewise.
* event-loop.c (create_file_handler): Likewise.
(create_async_signal_handler): Likewise.
(create_async_event_handler): Likewise.
(create_timer): Likewise.
* exec.c (build_section_table): Likewise.
* fbsd-nat.c (fbsd_remember_child): Likewise.
* fork-child.c (fork_inferior): Likewise.
* frv-tdep.c (new_variant): Likewise.
* gdbarch.sh (gdbarch_alloc): Likewise.
(append_name): Likewise.
* gdbtypes.c (rank_function): Likewise.
(copy_type_recursive): Likewise.
(add_dyn_prop): Likewise.
* gnu-nat.c (make_proc): Likewise.
(make_inf): Likewise.
(gnu_write_inferior): Likewise.
* gnu-v3-abi.c (build_gdb_vtable_type): Likewise.
(build_std_type_info_type): Likewise.
* guile/scm-param.c (compute_enum_list): Likewise.
* guile/scm-utils.c (gdbscm_parse_function_args): Likewise.
* guile/scm-value.c (gdbscm_value_call): Likewise.
* h8300-tdep.c (h8300_gdbarch_init): Likewise.
* hppa-tdep.c (hppa_init_objfile_priv_data): Likewise.
(read_unwind_info): Likewise.
* ia64-tdep.c (ia64_gdbarch_init): Likewise.
* infcall.c (dummy_frame_context_saver_setup): Likewise.
(call_function_by_hand_dummy): Likewise.
* infcmd.c (step_once): Likewise.
(finish_forward): Likewise.
(attach_command): Likewise.
(notice_new_inferior): Likewise.
* inferior.c (add_inferior_silent): Likewise.
* infrun.c (add_displaced_stepping_state): Likewise.
(save_infcall_control_state): Likewise.
(save_inferior_ptid): Likewise.
(_initialize_infrun): Likewise.
* jit.c (bfd_open_from_target_memory): Likewise.
(jit_gdbarch_data_init): Likewise.
* language.c (add_language): Likewise.
* linespec.c (decode_line_2): Likewise.
* linux-nat.c (add_to_pid_list): Likewise.
(add_initial_lwp): Likewise.
* linux-thread-db.c (add_thread_db_info): Likewise.
(record_thread): Likewise.
(info_auto_load_libthread_db): Likewise.
* m32c-tdep.c (m32c_gdbarch_init): Likewise.
* m68hc11-tdep.c (m68hc11_gdbarch_init): Likewise.
* m68k-tdep.c (m68k_gdbarch_init): Likewise.
* m88k-tdep.c (m88k_analyze_prologue): Likewise.
* macrocmd.c (macro_define_command): Likewise.
* macroexp.c (gather_arguments): Likewise.
* macroscope.c (sal_macro_scope): Likewise.
* macrotab.c (new_macro_table): Likewise.
* mdebugread.c (push_parse_stack): Likewise.
(parse_partial_symbols): Likewise.
(parse_symbol): Likewise.
(psymtab_to_symtab_1): Likewise.
(new_block): Likewise.
(new_psymtab): Likewise.
(mdebug_build_psymtabs): Likewise.
(add_pending): Likewise.
(elfmdebug_build_psymtabs): Likewise.
* mep-tdep.c (mep_gdbarch_init): Likewise.
* mi/mi-main.c (mi_execute_command): Likewise.
* mi/mi-parse.c (mi_parse_argv): Likewise.
* minidebug.c (lzma_open): Likewise.
* minsyms.c (terminate_minimal_symbol_table): Likewise.
* mips-linux-nat.c (mips_linux_insert_watchpoint): Likewise.
* mips-tdep.c (mips_gdbarch_init): Likewise.
* mn10300-tdep.c (mn10300_gdbarch_init): Likewise.
* msp430-tdep.c (msp430_gdbarch_init): Likewise.
* mt-tdep.c (mt_registers_info): Likewise.
* nat/aarch64-linux.c (aarch64_linux_new_thread): Likewise.
* nat/linux-btrace.c (linux_enable_bts): Likewise.
(linux_enable_pt): Likewise.
* nat/linux-osdata.c (linux_xfer_osdata_processes): Likewise.
(linux_xfer_osdata_processgroups): Likewise.
* nios2-tdep.c (nios2_gdbarch_init): Likewise.
* nto-procfs.c (procfs_meminfo): Likewise.
* objc-lang.c (start_msglist): Likewise.
(selectors_info): Likewise.
(classes_info): Likewise.
(find_methods): Likewise.
* objfiles.c (allocate_objfile): Likewise.
(update_section_map): Likewise.
* osabi.c (gdbarch_register_osabi): Likewise.
(gdbarch_register_osabi_sniffer): Likewise.
* parse.c (start_arglist): Likewise.
* ppc-linux-nat.c (hwdebug_find_thread_points_by_tid): Likewise.
(hwdebug_insert_point): Likewise.
* printcmd.c (display_command): Likewise.
(ui_printf): Likewise.
* procfs.c (create_procinfo): Likewise.
(load_syscalls): Likewise.
(proc_get_LDT_entry): Likewise.
(proc_update_threads): Likewise.
* prologue-value.c (make_pv_area): Likewise.
(pv_area_store): Likewise.
* psymtab.c (extend_psymbol_list): Likewise.
(init_psymbol_list): Likewise.
(allocate_psymtab): Likewise.
* python/py-inferior.c (add_thread_object): Likewise.
* python/py-param.c (compute_enum_values): Likewise.
* python/py-value.c (valpy_call): Likewise.
* python/py-varobj.c (py_varobj_iter_next): Likewise.
* python/python.c (ensure_python_env): Likewise.
* record-btrace.c (record_btrace_start_replaying): Likewise.
* record-full.c (record_full_reg_alloc): Likewise.
(record_full_mem_alloc): Likewise.
(record_full_end_alloc): Likewise.
(record_full_core_xfer_partial): Likewise.
* regcache.c (get_thread_arch_aspace_regcache): Likewise.
* remote-fileio.c (remote_fileio_init_fd_map): Likewise.
* remote-notif.c (remote_notif_state_allocate): Likewise.
* remote.c (demand_private_info): Likewise.
(remote_notif_stop_alloc_reply): Likewise.
(remote_enable_btrace): Likewise.
* reverse.c (save_bookmark_command): Likewise.
* rl78-tdep.c (rl78_gdbarch_init): Likewise.
* rx-tdep.c (rx_gdbarch_init): Likewise.
* s390-linux-nat.c (s390_insert_watchpoint): Likewise.
* ser-go32.c (dos_get_tty_state): Likewise.
(dos_copy_tty_state): Likewise.
* ser-mingw.c (ser_windows_open): Likewise.
(ser_console_wait_handle): Likewise.
(ser_console_get_tty_state): Likewise.
(make_pipe_state): Likewise.
(net_windows_open): Likewise.
* ser-unix.c (hardwire_get_tty_state): Likewise.
(hardwire_copy_tty_state): Likewise.
* solib-aix.c (solib_aix_new_lm_info): Likewise.
* solib-dsbt.c (dsbt_current_sos): Likewise.
(dsbt_relocate_main_executable): Likewise.
* solib-frv.c (frv_current_sos): Likewise.
(frv_relocate_main_executable): Likewise.
* solib-spu.c (spu_bfd_fopen): Likewise.
* solib-svr4.c (lm_info_read): Likewise.
(svr4_copy_library_list): Likewise.
(svr4_default_sos): Likewise.
* source.c (find_source_lines): Likewise.
(line_info): Likewise.
(add_substitute_path_rule): Likewise.
* spu-linux-nat.c (spu_bfd_open): Likewise.
* spu-tdep.c (info_spu_dma_cmdlist): Likewise.
* stabsread.c (dbx_lookup_type): Likewise.
(read_type): Likewise.
(read_member_functions): Likewise.
(read_struct_fields): Likewise.
(read_baseclasses): Likewise.
(read_args): Likewise.
(_initialize_stabsread): Likewise.
* stack.c (func_command): Likewise.
* stap-probe.c (handle_stap_probe): Likewise.
* symfile.c (addrs_section_sort): Likewise.
(addr_info_make_relative): Likewise.
(load_section_callback): Likewise.
(add_symbol_file_command): Likewise.
(init_filename_language_table): Likewise.
* symtab.c (create_filename_seen_cache): Likewise.
(sort_search_symbols_remove_dups): Likewise.
(search_symbols): Likewise.
* target.c (make_cleanup_restore_target_terminal): Likewise.
* thread.c (new_thread): Likewise.
(enable_thread_stack_temporaries): Likewise.
(make_cleanup_restore_current_thread): Likewise.
(thread_apply_all_command): Likewise.
* tic6x-tdep.c (tic6x_gdbarch_init): Likewise.
* top.c (gdb_readline_wrapper): Likewise.
* tracefile-tfile.c (tfile_trace_file_writer_new): Likewise.
* tracepoint.c (trace_find_line_command): Likewise.
(all_tracepoint_actions_and_cleanup): Likewise.
(make_cleanup_restore_current_traceframe): Likewise.
(get_uploaded_tp): Likewise.
(get_uploaded_tsv): Likewise.
* tui/tui-data.c (tui_alloc_generic_win_info): Likewise.
(tui_alloc_win_info): Likewise.
(tui_alloc_content): Likewise.
(tui_add_content_elements): Likewise.
* tui/tui-disasm.c (tui_find_disassembly_address): Likewise.
(tui_set_disassem_content): Likewise.
* ui-file.c (ui_file_new): Likewise.
(stdio_file_new): Likewise.
(tee_file_new): Likewise.
* utils.c (make_cleanup_restore_integer): Likewise.
(add_internal_problem_command): Likewise.
* v850-tdep.c (v850_gdbarch_init): Likewise.
* valops.c (find_oload_champ): Likewise.
* value.c (allocate_value_lazy): Likewise.
(record_latest_value): Likewise.
(create_internalvar): Likewise.
* varobj.c (install_variable): Likewise.
(new_variable): Likewise.
(new_root_variable): Likewise.
(cppush): Likewise.
(_initialize_varobj): Likewise.
* windows-nat.c (windows_make_so): Likewise.
* x86-nat.c (x86_add_process): Likewise.
* xcoffread.c (arrange_linetable): Likewise.
(allocate_include_entry): Likewise.
(process_linenos): Likewise.
(SYMBOL_DUP): Likewise.
(xcoff_start_psymtab): Likewise.
(xcoff_end_psymtab): Likewise.
* xml-support.c (gdb_xml_parse_attr_ulongest): Likewise.
* xtensa-tdep.c (xtensa_register_type): Likewise.
* gdbarch.c: Regenerate.
* gdbarch.h: Regenerate.
gdb/gdbserver/ChangeLog:
* ax.c (gdb_parse_agent_expr): Likewise.
(compile_bytecodes): Likewise.
* dll.c (loaded_dll): Likewise.
* event-loop.c (append_callback_event): Likewise.
(create_file_handler): Likewise.
(create_file_event): Likewise.
* hostio.c (handle_open): Likewise.
* inferiors.c (add_thread): Likewise.
(add_process): Likewise.
* linux-aarch64-low.c (aarch64_linux_new_process): Likewise.
* linux-arm-low.c (arm_new_process): Likewise.
(arm_new_thread): Likewise.
* linux-low.c (add_to_pid_list): Likewise.
(linux_add_process): Likewise.
(handle_extended_wait): Likewise.
(add_lwp): Likewise.
(enqueue_one_deferred_signal): Likewise.
(enqueue_pending_signal): Likewise.
(linux_resume_one_lwp_throw): Likewise.
(linux_resume_one_thread): Likewise.
(linux_read_memory): Likewise.
(linux_write_memory): Likewise.
* linux-mips-low.c (mips_linux_new_process): Likewise.
(mips_linux_new_thread): Likewise.
(mips_add_watchpoint): Likewise.
* linux-x86-low.c (initialize_low_arch): Likewise.
* lynx-low.c (lynx_add_process): Likewise.
* mem-break.c (set_raw_breakpoint_at): Likewise.
(set_breakpoint): Likewise.
(add_condition_to_breakpoint): Likewise.
(add_commands_to_breakpoint): Likewise.
(clone_agent_expr): Likewise.
(clone_one_breakpoint): Likewise.
* regcache.c (new_register_cache): Likewise.
* remote-utils.c (look_up_one_symbol): Likewise.
* server.c (queue_stop_reply): Likewise.
(start_inferior): Likewise.
(queue_stop_reply_callback): Likewise.
(handle_target_event): Likewise.
* spu-low.c (fetch_ppc_memory): Likewise.
(store_ppc_memory): Likewise.
* target.c (set_target_ops): Likewise.
* thread-db.c (thread_db_load_search): Likewise.
(try_thread_db_load_1): Likewise.
* tracepoint.c (add_tracepoint): Likewise.
(add_tracepoint_action): Likewise.
(create_trace_state_variable): Likewise.
(cmd_qtdpsrc): Likewise.
(cmd_qtro): Likewise.
(add_while_stepping_state): Likewise.
* win32-low.c (child_add_thread): Likewise.
(get_image_name): Likewise.
2015-08-27 05:16:07 +08:00
|
|
|
btinfo->call_history = XCNEW (struct btrace_call_history);
|
btrace: change branch trace data structure
The branch trace is represented as 3 vectors:
- a block vector
- a instruction vector
- a function vector
Each vector (except for the first) is computed from the one above.
Change this into a graph where a node represents a sequence of instructions
belonging to the same function and where we have three types of edges to connect
the function segments:
- control flow
- same function (instance)
- call stack
This allows us to navigate in the branch trace. We will need this for "record
goto" and reverse execution.
This patch introduces the data structure and computes the control flow edges.
It also introduces iterator structs to simplify iterating over the branch trace
in control-flow order.
It also fixes PR gdb/15240 since now recursive calls are handled correctly.
Fix the test that got the number of expected fib instances and also the
function numbers wrong.
The current instruction had been part of the branch trace. This will look odd
once we start support for reverse execution. Remove it. We still keep it in
the trace itself to allow extending the branch trace more easily in the future.
2014-01-16 Markus Metzger <markus.t.metzger@intel.com>
* btrace.h (struct btrace_func_link): New.
(enum btrace_function_flag): New.
(struct btrace_inst): Rename to ...
(struct btrace_insn): ...this. Update all users.
(struct btrace_func) <ibegin, iend>: Remove.
(struct btrace_func_link): New.
(struct btrace_func): Rename to ...
(struct btrace_function): ...this. Update all users.
(struct btrace_function) <segment, flow, up, insn, insn_offset)
(number, level, flags>: New.
(struct btrace_insn_iterator): Rename to ...
(struct btrace_insn_history): ...this.
Update all users.
(struct btrace_insn_iterator, btrace_call_iterator): New.
(struct btrace_target_info) <btrace, itrace, ftrace>: Remove.
(struct btrace_target_info) <begin, end, level>
<insn_history, call_history>: New.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* btrace.c (btrace_init_insn_iterator)
(btrace_init_func_iterator, compute_itrace): Remove.
(ftrace_print_function_name, ftrace_print_filename)
(ftrace_skip_file): Change
parameter to const.
(ftrace_init_func): Remove.
(ftrace_debug): Use new btrace_function fields.
(ftrace_function_switched): Also consider gaining and
losing symbol information).
(ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return)
(ftrace_new_switch, ftrace_find_caller, ftrace_new_function)
(ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall):
New.
(ftrace_new_function): Move. Remove debug print.
(ftrace_update_lines, ftrace_update_insns): New.
(ftrace_update_function): Check for call, ret, and jump.
(compute_ftrace): Renamed to ...
(btrace_compute_ftrace): ...this. Rewritten to compute call
stack.
(btrace_fetch, btrace_clear): Updated.
(btrace_insn_get, btrace_insn_number, btrace_insn_begin)
(btrace_insn_end, btrace_insn_prev, btrace_insn_next)
(btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get)
(btrace_call_number, btrace_call_begin, btrace_call_end)
(btrace_call_prev, btrace_call_next, btrace_call_cmp)
(btrace_find_function_by_number, btrace_set_insn_history)
(btrace_set_call_history): New.
* record-btrace.c (require_btrace): Use new btrace thread
info fields.
(record_btrace_info, btrace_insn_history)
(record_btrace_insn_history, record_btrace_insn_history_range):
Use new btrace thread info fields and new iterator.
(btrace_func_history_src_line): Rename to ...
(btrace_call_history_src_line): ...this. Use new btrace
thread info fields.
(btrace_func_history): Rename to ...
(btrace_call_history): ...this. Use new btrace thread info
fields and new iterator.
(record_btrace_call_history, record_btrace_call_history_range):
Use new btrace thread info fields and new iterator.
testsuite/
* gdb.btrace/function_call_history.exp: Fix expected function
trace.
* gdb.btrace/instruction_history.exp: Initialize traced.
Remove traced_functions.
2013-03-22 21:32:47 +08:00
|
|
|
|
|
|
|
btinfo->call_history->begin = *begin;
|
|
|
|
btinfo->call_history->end = *end;
|
|
|
|
}
|
2013-04-26 14:37:06 +08:00
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
int
|
|
|
|
btrace_is_replaying (struct thread_info *tp)
|
|
|
|
{
|
|
|
|
return tp->btrace.replay != NULL;
|
|
|
|
}
|
2013-09-10 18:27:14 +08:00
|
|
|
|
|
|
|
/* See btrace.h. */
|
|
|
|
|
|
|
|
int
|
|
|
|
btrace_is_empty (struct thread_info *tp)
|
|
|
|
{
|
|
|
|
struct btrace_insn_iterator begin, end;
|
|
|
|
struct btrace_thread_info *btinfo;
|
|
|
|
|
|
|
|
btinfo = &tp->btrace;
|
|
|
|
|
2017-05-30 18:47:37 +08:00
|
|
|
if (btinfo->functions.empty ())
|
2013-09-10 18:27:14 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
btrace_insn_begin (&begin, btinfo);
|
|
|
|
btrace_insn_end (&end, btinfo);
|
|
|
|
|
|
|
|
return btrace_insn_cmp (&begin, &end) == 0;
|
|
|
|
}
|
2013-11-13 22:31:07 +08:00
|
|
|
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
#if defined (HAVE_LIBIPT)
|
|
|
|
|
|
|
|
/* Print a single packet. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
pt_print_packet (const struct pt_packet *packet)
|
|
|
|
{
|
|
|
|
switch (packet->type)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
printf_unfiltered (("[??: %x]"), packet->type);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_psb:
|
|
|
|
printf_unfiltered (("psb"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_psbend:
|
|
|
|
printf_unfiltered (("psbend"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_pad:
|
|
|
|
printf_unfiltered (("pad"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_tip:
|
|
|
|
printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
|
|
|
|
packet->payload.ip.ipc,
|
|
|
|
packet->payload.ip.ip);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_tip_pge:
|
|
|
|
printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
|
|
|
|
packet->payload.ip.ipc,
|
|
|
|
packet->payload.ip.ip);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_tip_pgd:
|
|
|
|
printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
|
|
|
|
packet->payload.ip.ipc,
|
|
|
|
packet->payload.ip.ip);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_fup:
|
|
|
|
printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
|
|
|
|
packet->payload.ip.ipc,
|
|
|
|
packet->payload.ip.ip);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_tnt_8:
|
|
|
|
printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
|
|
|
|
packet->payload.tnt.bit_size,
|
|
|
|
packet->payload.tnt.payload);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_tnt_64:
|
|
|
|
printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
|
|
|
|
packet->payload.tnt.bit_size,
|
|
|
|
packet->payload.tnt.payload);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_pip:
|
2015-07-02 21:16:09 +08:00
|
|
|
printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
|
|
|
|
packet->payload.pip.nr ? (" nr") : (""));
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_tsc:
|
|
|
|
printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_cbr:
|
|
|
|
printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_mode:
|
|
|
|
switch (packet->payload.mode.leaf)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case pt_mol_exec:
|
|
|
|
printf_unfiltered (("mode.exec%s%s"),
|
|
|
|
packet->payload.mode.bits.exec.csl
|
|
|
|
? (" cs.l") : (""),
|
|
|
|
packet->payload.mode.bits.exec.csd
|
|
|
|
? (" cs.d") : (""));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case pt_mol_tsx:
|
|
|
|
printf_unfiltered (("mode.tsx%s%s"),
|
|
|
|
packet->payload.mode.bits.tsx.intx
|
|
|
|
? (" intx") : (""),
|
|
|
|
packet->payload.mode.bits.tsx.abrt
|
|
|
|
? (" abrt") : (""));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_ovf:
|
|
|
|
printf_unfiltered (("ovf"));
|
|
|
|
break;
|
|
|
|
|
2015-07-02 21:16:09 +08:00
|
|
|
case ppt_stop:
|
|
|
|
printf_unfiltered (("stop"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_vmcs:
|
|
|
|
printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_tma:
|
|
|
|
printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
|
|
|
|
packet->payload.tma.fc);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_mtc:
|
|
|
|
printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_cyc:
|
|
|
|
printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ppt_mnt:
|
|
|
|
printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
|
|
|
|
break;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decode packets into MAINT using DECODER. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_maint_decode_pt (struct btrace_maint_info *maint,
|
|
|
|
struct pt_packet_decoder *decoder)
|
|
|
|
{
|
|
|
|
int errcode;
|
|
|
|
|
2019-09-20 01:17:59 +08:00
|
|
|
if (maint->variant.pt.packets == NULL)
|
2019-10-03 05:01:46 +08:00
|
|
|
maint->variant.pt.packets = new std::vector<btrace_pt_packet>;
|
2019-09-20 01:17:59 +08:00
|
|
|
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
struct btrace_pt_packet packet;
|
|
|
|
|
|
|
|
errcode = pt_pkt_sync_forward (decoder);
|
|
|
|
if (errcode < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
pt_pkt_get_offset (decoder, &packet.offset);
|
|
|
|
|
|
|
|
errcode = pt_pkt_next (decoder, &packet.packet,
|
|
|
|
sizeof(packet.packet));
|
|
|
|
if (errcode < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
|
|
|
|
{
|
|
|
|
packet.errcode = pt_errcode (errcode);
|
2019-09-20 01:17:59 +08:00
|
|
|
maint->variant.pt.packets->push_back (packet);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (errcode == -pte_eos)
|
|
|
|
break;
|
|
|
|
|
|
|
|
packet.errcode = pt_errcode (errcode);
|
2019-09-20 01:17:59 +08:00
|
|
|
maint->variant.pt.packets->push_back (packet);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
|
|
|
|
packet.offset, pt_errstr (packet.errcode));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (errcode != -pte_eos)
|
2016-01-12 23:03:11 +08:00
|
|
|
warning (_("Failed to synchronize onto the Intel Processor Trace "
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
"stream: %s."), pt_errstr (pt_errcode (errcode)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the packet history in BTINFO. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
|
|
|
|
{
|
|
|
|
struct pt_packet_decoder *decoder;
|
2018-02-02 19:29:48 +08:00
|
|
|
const struct btrace_cpu *cpu;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
struct btrace_data_pt *pt;
|
|
|
|
struct pt_config config;
|
|
|
|
int errcode;
|
|
|
|
|
|
|
|
pt = &btinfo->data.variant.pt;
|
|
|
|
|
|
|
|
/* Nothing to do if there is no trace. */
|
|
|
|
if (pt->size == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset (&config, 0, sizeof(config));
|
|
|
|
|
|
|
|
config.size = sizeof (config);
|
|
|
|
config.begin = pt->data;
|
|
|
|
config.end = pt->data + pt->size;
|
|
|
|
|
2018-02-02 19:29:48 +08:00
|
|
|
cpu = record_btrace_get_cpu ();
|
|
|
|
if (cpu == nullptr)
|
|
|
|
cpu = &pt->config.cpu;
|
|
|
|
|
|
|
|
/* We treat an unknown vendor as 'no errata'. */
|
|
|
|
if (cpu->vendor != CV_UNKNOWN)
|
|
|
|
{
|
|
|
|
config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
|
|
|
|
config.cpu.family = cpu->family;
|
|
|
|
config.cpu.model = cpu->model;
|
|
|
|
config.cpu.stepping = cpu->stepping;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
2018-02-02 19:29:48 +08:00
|
|
|
errcode = pt_cpu_errata (&config.errata, &config.cpu);
|
|
|
|
if (errcode < 0)
|
|
|
|
error (_("Failed to configure the Intel Processor Trace "
|
|
|
|
"decoder: %s."), pt_errstr (pt_errcode (errcode)));
|
|
|
|
}
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
decoder = pt_pkt_alloc_decoder (&config);
|
|
|
|
if (decoder == NULL)
|
2016-01-12 23:03:11 +08:00
|
|
|
error (_("Failed to allocate the Intel Processor Trace decoder."));
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
2019-04-04 06:02:42 +08:00
|
|
|
try
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
|
|
|
btrace_maint_decode_pt (&btinfo->maint, decoder);
|
|
|
|
}
|
2019-04-04 05:59:07 +08:00
|
|
|
catch (const gdb_exception &except)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
|
|
|
pt_pkt_free_decoder (decoder);
|
|
|
|
|
|
|
|
if (except.reason < 0)
|
Replace throw_exception with throw in some cases
This replaces throw_exception with "throw;" when possible. This was
written by script. The rule that is followed is that uses of the
form:
catch (... &name)
{
...
throw_exception (name);
}
... can be rewritten. This should always be safe, because exceptions
are caught by const reference, and therefore can't be modified in the
body of the catch.
gdb/ChangeLog
2019-04-08 Tom Tromey <tom@tromey.com>
* valops.c (value_rtti_indirect_type): Replace throw_exception
with throw.
* tracefile-tfile.c (tfile_target_open): Replace throw_exception
with throw.
* thread.c (thr_try_catch_cmd): Replace throw_exception with
throw.
* target.c (target_translate_tls_address): Replace throw_exception
with throw.
* stack.c (frame_apply_command_count): Replace throw_exception
with throw.
* solib-spu.c (append_ocl_sos): Replace throw_exception with
throw.
* s390-tdep.c (s390_frame_unwind_cache): Replace throw_exception
with throw.
* rs6000-tdep.c (rs6000_frame_cache)
(rs6000_epilogue_frame_cache): Replace throw_exception with throw.
* remote.c: Replace throw_exception with throw.
* record-full.c (record_full_message, record_full_wait_1)
(record_full_restore): Replace throw_exception with throw.
* record-btrace.c:
(get_thread_current_frame_id, record_btrace_start_replaying)
(cmd_record_btrace_bts_start, cmd_record_btrace_pt_start)
(cmd_record_btrace_start): Replace throw_exception with throw.
* parse.c (parse_exp_in_context_1): Replace throw_exception with
throw.
* linux-nat.c (detach_one_lwp, linux_resume_one_lwp)
(resume_stopped_resumed_lwps): Replace throw_exception with throw.
* linespec.c:
(find_linespec_symbols): Replace throw_exception with throw.
* infrun.c (displaced_step_prepare, resume): Replace
throw_exception with throw.
* infcmd.c (post_create_inferior): Replace throw_exception with
throw.
* inf-loop.c (inferior_event_handler): Replace throw_exception
with throw.
* i386-tdep.c (i386_frame_cache, i386_epilogue_frame_cache)
(i386_sigtramp_frame_cache): Replace throw_exception with throw.
* frame.c (frame_unwind_pc, get_prev_frame_if_no_cycle)
(get_prev_frame_always, get_frame_pc_if_available)
(get_frame_address_in_block_if_available, get_frame_language):
Replace throw_exception with throw.
* frame-unwind.c (frame_unwind_try_unwinder): Replace
throw_exception with throw.
* eval.c (fetch_subexp_value, evaluate_var_value)
(evaluate_funcall, evaluate_subexp_standard): Replace
throw_exception with throw.
* dwarf2loc.c (call_site_find_chain)
(dwarf2_evaluate_loc_desc_full, dwarf2_locexpr_baton_eval):
Replace throw_exception with throw.
* dwarf2-frame.c (dwarf2_frame_cache): Replace throw_exception
with throw.
* darwin-nat.c (darwin_attach_pid): Replace throw_exception with
throw.
* cp-abi.c (baseclass_offset): Replace throw_exception with throw.
* completer.c (complete_line_internal): Replace throw_exception
with throw.
* compile/compile-object-run.c (compile_object_run): Replace
throw_exception with throw.
* cli/cli-script.c (process_next_line): Replace throw_exception
with throw.
* btrace.c (btrace_compute_ftrace_pt, btrace_compute_ftrace)
(btrace_enable, btrace_maint_update_pt_packets): Replace
throw_exception with throw.
* breakpoint.c (create_breakpoint, save_breakpoints): Replace
throw_exception with throw.
* break-catch-throw.c (re_set_exception_catchpoint): Replace
throw_exception with throw.
* amd64-tdep.c (amd64_frame_cache, amd64_sigtramp_frame_cache)
(amd64_epilogue_frame_cache): Replace throw_exception with throw.
* aarch64-tdep.c (aarch64_make_prologue_cache)
(aarch64_make_stub_cache): Replace throw_exception with throw.
gdb/gdbserver/ChangeLog
2019-04-08 Tom Tromey <tom@tromey.com>
* linux-low.c (linux_detach_one_lwp): Replace throw_exception with
throw.
(linux_resume_one_lwp): Likewise.
2019-01-29 01:45:45 +08:00
|
|
|
throw;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
pt_pkt_free_decoder (decoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* !defined (HAVE_LIBIPT) */
|
|
|
|
|
|
|
|
/* Update the packet maintenance information for BTINFO and store the
|
|
|
|
low and high bounds into BEGIN and END, respectively.
|
|
|
|
Store the current iterator state into FROM and TO. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_maint_update_packets (struct btrace_thread_info *btinfo,
|
|
|
|
unsigned int *begin, unsigned int *end,
|
|
|
|
unsigned int *from, unsigned int *to)
|
|
|
|
{
|
|
|
|
switch (btinfo->data.format)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
*begin = 0;
|
|
|
|
*end = 0;
|
|
|
|
*from = 0;
|
|
|
|
*to = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
|
|
|
/* Nothing to do - we operate directly on BTINFO->DATA. */
|
|
|
|
*begin = 0;
|
2019-09-16 21:12:27 +08:00
|
|
|
*end = btinfo->data.variant.bts.blocks->size ();
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
*from = btinfo->maint.variant.bts.packet_history.begin;
|
|
|
|
*to = btinfo->maint.variant.bts.packet_history.end;
|
|
|
|
break;
|
|
|
|
|
|
|
|
#if defined (HAVE_LIBIPT)
|
|
|
|
case BTRACE_FORMAT_PT:
|
2019-09-20 01:17:59 +08:00
|
|
|
if (btinfo->maint.variant.pt.packets == nullptr)
|
2019-10-03 05:01:46 +08:00
|
|
|
btinfo->maint.variant.pt.packets = new std::vector<btrace_pt_packet>;
|
2019-09-20 01:17:59 +08:00
|
|
|
|
|
|
|
if (btinfo->maint.variant.pt.packets->empty ())
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
btrace_maint_update_pt_packets (btinfo);
|
|
|
|
|
|
|
|
*begin = 0;
|
2019-09-20 01:17:59 +08:00
|
|
|
*end = btinfo->maint.variant.pt.packets->size ();
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
*from = btinfo->maint.variant.pt.packet_history.begin;
|
|
|
|
*to = btinfo->maint.variant.pt.packet_history.end;
|
|
|
|
break;
|
|
|
|
#endif /* defined (HAVE_LIBIPT) */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
|
|
|
|
update the current iterator position. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
btrace_maint_print_packets (struct btrace_thread_info *btinfo,
|
|
|
|
unsigned int begin, unsigned int end)
|
|
|
|
{
|
|
|
|
switch (btinfo->data.format)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
|
|
|
{
|
2019-10-03 05:01:46 +08:00
|
|
|
const std::vector<btrace_block> &blocks
|
2019-09-16 21:12:27 +08:00
|
|
|
= *btinfo->data.variant.bts.blocks;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
unsigned int blk;
|
|
|
|
|
|
|
|
for (blk = begin; blk < end; ++blk)
|
|
|
|
{
|
2019-09-16 21:12:27 +08:00
|
|
|
const btrace_block &block = blocks.at (blk);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
|
2019-09-16 21:12:27 +08:00
|
|
|
core_addr_to_string_nz (block.begin),
|
|
|
|
core_addr_to_string_nz (block.end));
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
btinfo->maint.variant.bts.packet_history.begin = begin;
|
|
|
|
btinfo->maint.variant.bts.packet_history.end = end;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
#if defined (HAVE_LIBIPT)
|
|
|
|
case BTRACE_FORMAT_PT:
|
|
|
|
{
|
2019-10-03 05:01:46 +08:00
|
|
|
const std::vector<btrace_pt_packet> &packets
|
2019-09-20 01:17:59 +08:00
|
|
|
= *btinfo->maint.variant.pt.packets;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
unsigned int pkt;
|
|
|
|
|
|
|
|
for (pkt = begin; pkt < end; ++pkt)
|
|
|
|
{
|
2019-09-20 01:17:59 +08:00
|
|
|
const struct btrace_pt_packet &packet = packets.at (pkt);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
printf_unfiltered ("%u\t", pkt);
|
2019-09-20 01:17:59 +08:00
|
|
|
printf_unfiltered ("0x%" PRIx64 "\t", packet.offset);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
2019-09-20 01:17:59 +08:00
|
|
|
if (packet.errcode == pte_ok)
|
|
|
|
pt_print_packet (&packet.packet);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
else
|
2019-09-20 01:17:59 +08:00
|
|
|
printf_unfiltered ("[error: %s]", pt_errstr (packet.errcode));
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
printf_unfiltered ("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
btinfo->maint.variant.pt.packet_history.begin = begin;
|
|
|
|
btinfo->maint.variant.pt.packet_history.end = end;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* defined (HAVE_LIBIPT) */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read a number from an argument string. */
|
|
|
|
|
|
|
|
static unsigned int
|
2017-09-13 11:12:42 +08:00
|
|
|
get_uint (const char **arg)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
2017-09-13 11:12:42 +08:00
|
|
|
const char *begin, *pos;
|
|
|
|
char *end;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
unsigned long number;
|
|
|
|
|
|
|
|
begin = *arg;
|
|
|
|
pos = skip_spaces (begin);
|
|
|
|
|
|
|
|
if (!isdigit (*pos))
|
|
|
|
error (_("Expected positive number, got: %s."), pos);
|
|
|
|
|
|
|
|
number = strtoul (pos, &end, 10);
|
|
|
|
if (number > UINT_MAX)
|
|
|
|
error (_("Number too big."));
|
|
|
|
|
|
|
|
*arg += (end - begin);
|
|
|
|
|
|
|
|
return (unsigned int) number;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read a context size from an argument string. */
|
|
|
|
|
|
|
|
static int
|
2017-09-13 11:12:42 +08:00
|
|
|
get_context_size (const char **arg)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
2017-09-13 11:12:42 +08:00
|
|
|
const char *pos = skip_spaces (*arg);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
if (!isdigit (*pos))
|
|
|
|
error (_("Expected positive number, got: %s."), pos);
|
|
|
|
|
2017-09-13 11:12:42 +08:00
|
|
|
char *end;
|
|
|
|
long result = strtol (pos, &end, 10);
|
|
|
|
*arg = end;
|
|
|
|
return result;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Complain about junk at the end of an argument string. */
|
|
|
|
|
|
|
|
static void
|
2017-09-13 11:12:42 +08:00
|
|
|
no_chunk (const char *arg)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
|
|
|
if (*arg != 0)
|
|
|
|
error (_("Junk after argument: %s."), arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The "maintenance btrace packet-history" command. */
|
|
|
|
|
|
|
|
static void
|
2017-09-13 11:12:42 +08:00
|
|
|
maint_btrace_packet_history_cmd (const char *arg, int from_tty)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
|
|
|
struct btrace_thread_info *btinfo;
|
|
|
|
unsigned int size, begin, end, from, to;
|
|
|
|
|
Multi-target support
This commit adds multi-target support to GDB. What this means is that
with this commit, GDB can now be connected to different targets at the
same time. E.g., you can debug a live native process and a core dump
at the same time, connect to multiple gdbservers, etc.
Actually, the word "target" is overloaded in gdb. We already have a
target stack, with pushes several target_ops instances on top of one
another. We also have "info target" already, which means something
completely different to what this patch does.
So from here on, I'll be using the "target connections" term, to mean
an open process_stratum target, pushed on a target stack. This patch
makes gdb have multiple target stacks, and multiple process_stratum
targets open simultaneously. The user-visible changes / commands will
also use this terminology, but of course it's all open to debate.
User-interface-wise, not that much changes. The main difference is
that each inferior may have its own target connection.
A target connection (e.g., a target extended-remote connection) may
support debugging multiple processes, just as before.
Say you're debugging against gdbserver in extended-remote mode, and
you do "add-inferior" to prepare to spawn a new process, like:
(gdb) target extended-remote :9999
...
(gdb) start
...
(gdb) add-inferior
Added inferior 2
(gdb) inferior 2
[Switching to inferior 2 [<null>] (<noexec>)]
(gdb) file a.out
...
(gdb) start
...
At this point, you have two inferiors connected to the same gdbserver.
With this commit, GDB will maintain a target stack per inferior,
instead of a global target stack.
To preserve the behavior above, by default, "add-inferior" makes the
new inferior inherit a copy of the target stack of the current
inferior. Same across a fork - the child inherits a copy of the
target stack of the parent. While the target stacks are copied, the
targets themselves are not. Instead, target_ops is made a
refcounted_object, which means that target_ops instances are
refcounted, which each inferior counting for a reference.
What if you want to create an inferior and connect it to some _other_
target? For that, this commit introduces a new "add-inferior
-no-connection" option that makes the new inferior not share the
current inferior's target. So you could do:
(gdb) target extended-remote :9999
Remote debugging using :9999
...
(gdb) add-inferior -no-connection
[New inferior 2]
Added inferior 2
(gdb) inferior 2
[Switching to inferior 2 [<null>] (<noexec>)]
(gdb) info inferiors
Num Description Executable
1 process 18401 target:/home/pedro/tmp/main
* 2 <null>
(gdb) tar extended-remote :10000
Remote debugging using :10000
...
(gdb) info inferiors
Num Description Executable
1 process 18401 target:/home/pedro/tmp/main
* 2 process 18450 target:/home/pedro/tmp/main
(gdb)
A following patch will extended "info inferiors" to include a column
indicating which connection an inferior is bound to, along with a
couple other UI tweaks.
Other than that, debugging is the same as before. Users interact with
inferiors and threads as before. The only difference is that
inferiors may be bound to processes running in different machines.
That's pretty much all there is to it in terms of noticeable UI
changes.
On to implementation.
Since we can be connected to different systems at the same time, a
ptid_t is no longer a unique identifier. Instead a thread can be
identified by a pair of ptid_t and 'process_stratum_target *', the
later being the instance of the process_stratum target that owns the
process/thread. Note that process_stratum_target inherits from
target_ops, and all process_stratum targets inherit from
process_stratum_target. In earlier patches, many places in gdb were
converted to refer to threads by thread_info pointer instead of
ptid_t, but there are still places in gdb where we start with a
pid/tid and need to find the corresponding inferior or thread_info
objects. So you'll see in the patch many places adding a
process_stratum_target parameter to functions that used to take only a
ptid_t.
Since each inferior has its own target stack now, we can always find
the process_stratum target for an inferior. That is done via a
inf->process_target() convenience method.
Since each inferior has its own target stack, we need to handle the
"beneath" calls when servicing target calls. The solution I settled
with is just to make sure to switch the current inferior to the
inferior you want before making a target call. Not relying on global
context is just not feasible in current GDB. Fortunately, there
aren't that many places that need to do that, because generally most
code that calls target methods already has the current context
pointing to the right inferior/thread. Note, to emphasize -- there's
no method to "switch to this target stack". Instead, you switch the
current inferior, and that implicitly switches the target stack.
In some spots, we need to iterate over all inferiors so that we reach
all target stacks.
Native targets are still singletons. There's always only a single
instance of such targets.
Remote targets however, we'll have one instance per remote connection.
The exec target is still a singleton. There's only one instance. I
did not see the point of instanciating more than one exec_target
object.
After vfork, we need to make sure to push the exec target on the new
inferior. See exec_on_vfork.
For type safety, functions that need a {target, ptid} pair to identify
a thread, take a process_stratum_target pointer for target parameter
instead of target_ops *. Some shared code in gdb/nat/ also need to
gain a target pointer parameter. This poses an issue, since gdbserver
doesn't have process_stratum_target, only target_ops. To fix this,
this commit renames gdbserver's target_ops to process_stratum_target.
I think this makes sense. There's no concept of target stack in
gdbserver, and gdbserver's target_ops really implements a
process_stratum-like target.
The thread and inferior iterator functions also gain
process_stratum_target parameters. These are used to be able to
iterate over threads and inferiors of a given target. Following usual
conventions, if the target pointer is null, then we iterate over
threads and inferiors of all targets.
I tried converting "add-inferior" to the gdb::option framework, as a
preparatory patch, but that stumbled on the fact that gdb::option does
not support file options yet, for "add-inferior -exec". I have a WIP
patchset that adds that, but it's not a trivial patch, mainly due to
need to integrate readline's filename completion, so I deferred that
to some other time.
In infrun.c/infcmd.c, the main change is that we need to poll events
out of all targets. See do_target_wait. Right after collecting an
event, we switch the current inferior to an inferior bound to the
target that reported the event, so that target methods can be used
while handling the event. This makes most of the code transparent to
multi-targets. See fetch_inferior_event.
infrun.c:stop_all_threads is interesting -- in this function we need
to stop all threads of all targets. What the function does is send an
asynchronous stop request to all threads, and then synchronously waits
for events, with target_wait, rinse repeat, until all it finds are
stopped threads. Now that we have multiple targets, it's not
efficient to synchronously block in target_wait waiting for events out
of one target. Instead, we implement a mini event loop, with
interruptible_select, select'ing on one file descriptor per target.
For this to work, we need to be able to ask the target for a waitable
file descriptor. Such file descriptors already exist, they are the
descriptors registered in the main event loop with add_file_handler,
inside the target_async implementations. This commit adds a new
target_async_wait_fd target method that just returns the file
descriptor in question. See wait_one / stop_all_threads in infrun.c.
The 'threads_executing' global is made a per-target variable. Since
it is only relevant to process_stratum_target targets, this is where
it is put, instead of in target_ops.
You'll notice that remote.c includes some FIXME notes. These refer to
the fact that the global arrays that hold data for the remote packets
supported are still globals. For example, if we connect to two
different servers/stubs, then each might support different remote
protocol features. They might even be different architectures, like
e.g., one ARM baremetal stub, and a x86 gdbserver, to debug a
host/controller scenario as a single program. That isn't going to
work correctly today, because of said globals. I'm leaving fixing
that for another pass, since it does not appear to be trivial, and I'd
rather land the base work first. It's already useful to be able to
debug multiple instances of the same server (e.g., a distributed
cluster, where you have full control over the servers installed), so I
think as is it's already reasonable incremental progress.
Current limitations:
- You can only resume more that one target at the same time if all
targets support asynchronous debugging, and support non-stop mode.
It should be possible to support mixed all-stop + non-stop
backends, but that is left for another time. This means that
currently in order to do multi-target with gdbserver you need to
issue "maint set target-non-stop on". I would like to make that
mode be the default, but we're not there yet. Note that I'm
talking about how the target backend works, only. User-visible
all-stop mode works just fine.
- As explained above, connecting to different remote servers at the
same time is likely to produce bad results if they don't support the
exact set of RSP features.
FreeBSD updates courtesy of John Baldwin.
gdb/ChangeLog:
2020-01-10 Pedro Alves <palves@redhat.com>
John Baldwin <jhb@FreeBSD.org>
* aarch64-linux-nat.c
(aarch64_linux_nat_target::thread_architecture): Adjust.
* ada-tasks.c (print_ada_task_info): Adjust find_thread_ptid call.
(task_command_1): Likewise.
* aix-thread.c (sync_threadlists, aix_thread_target::resume)
(aix_thread_target::wait, aix_thread_target::fetch_registers)
(aix_thread_target::store_registers)
(aix_thread_target::thread_alive): Adjust.
* amd64-fbsd-tdep.c: Include "inferior.h".
(amd64fbsd_get_thread_local_address): Pass down target.
* amd64-linux-nat.c (ps_get_thread_area): Use ps_prochandle
thread's gdbarch instead of target_gdbarch.
* break-catch-sig.c (signal_catchpoint_print_it): Adjust call to
get_last_target_status.
* break-catch-syscall.c (print_it_catch_syscall): Likewise.
* breakpoint.c (breakpoints_should_be_inserted_now): Consider all
inferiors.
(update_inserted_breakpoint_locations): Skip if inferiors with no
execution.
(update_global_location_list): When handling moribund locations,
find representative inferior for location's pspace, and use thread
count of its process_stratum target.
* bsd-kvm.c (bsd_kvm_target_open): Pass target down.
* bsd-uthread.c (bsd_uthread_target::wait): Use
as_process_stratum_target and adjust thread_change_ptid and
add_thread calls.
(bsd_uthread_target::update_thread_list): Use
as_process_stratum_target and adjust find_thread_ptid,
thread_change_ptid and add_thread calls.
* btrace.c (maint_btrace_packet_history_cmd): Adjust
find_thread_ptid call.
* corelow.c (add_to_thread_list): Adjust add_thread call.
(core_target_open): Adjust add_thread_silent and thread_count
calls.
(core_target::pid_to_str): Adjust find_inferior_ptid call.
* ctf.c (ctf_target_open): Adjust add_thread_silent call.
* event-top.c (async_disconnect): Pop targets from all inferiors.
* exec.c (add_target_sections): Push exec target on all inferiors
sharing the program space.
(remove_target_sections): Remove the exec target from all
inferiors sharing the program space.
(exec_on_vfork): New.
* exec.h (exec_on_vfork): Declare.
* fbsd-nat.c (fbsd_add_threads): Add fbsd_nat_target parameter.
Pass it down.
(fbsd_nat_target::update_thread_list): Adjust.
(fbsd_nat_target::resume): Adjust.
(fbsd_handle_debug_trap): Add fbsd_nat_target parameter. Pass it
down.
(fbsd_nat_target::wait, fbsd_nat_target::post_attach): Adjust.
* fbsd-tdep.c (fbsd_corefile_thread): Adjust
get_thread_arch_regcache call.
* fork-child.c (gdb_startup_inferior): Pass target down to
startup_inferior and set_executing.
* gdbthread.h (struct process_stratum_target): Forward declare.
(add_thread, add_thread_silent, add_thread_with_info)
(in_thread_list): Add process_stratum_target parameter.
(find_thread_ptid(inferior*, ptid_t)): New overload.
(find_thread_ptid, thread_change_ptid): Add process_stratum_target
parameter.
(all_threads()): Delete overload.
(all_threads, all_non_exited_threads): Add process_stratum_target
parameter.
(all_threads_safe): Use brace initialization.
(thread_count): Add process_stratum_target parameter.
(set_resumed, set_running, set_stop_requested, set_executing)
(threads_are_executing, finish_thread_state): Add
process_stratum_target parameter.
(switch_to_thread): Use is_current_thread.
* i386-fbsd-tdep.c: Include "inferior.h".
(i386fbsd_get_thread_local_address): Pass down target.
* i386-linux-nat.c (i386_linux_nat_target::low_resume): Adjust.
* inf-child.c (inf_child_target::maybe_unpush_target): Remove
have_inferiors check.
* inf-ptrace.c (inf_ptrace_target::create_inferior)
(inf_ptrace_target::attach): Adjust.
* infcall.c (run_inferior_call): Adjust.
* infcmd.c (run_command_1): Pass target to
scoped_finish_thread_state.
(proceed_thread_callback): Skip inferiors with no execution.
(continue_command): Rename 'all_threads' local to avoid hiding
'all_threads' function. Adjust get_last_target_status call.
(prepare_one_step): Adjust set_running call.
(signal_command): Use user_visible_resume_target. Compare thread
pointers instead of inferior_ptid.
(info_program_command): Adjust to pass down target.
(attach_command): Mark target's 'thread_executing' flag.
(stop_current_target_threads_ns): New, factored out from ...
(interrupt_target_1): ... this. Switch inferior before making
target calls.
* inferior-iter.h
(struct all_inferiors_iterator, struct all_inferiors_range)
(struct all_inferiors_safe_range)
(struct all_non_exited_inferiors_range): Filter on
process_stratum_target too. Remove explicit.
* inferior.c (inferior::inferior): Push dummy target on target
stack.
(find_inferior_pid, find_inferior_ptid, number_of_live_inferiors):
Add process_stratum_target parameter, and pass it down.
(have_live_inferiors): Adjust.
(switch_to_inferior_and_push_target): New.
(add_inferior_command, clone_inferior_command): Handle
"-no-connection" parameter. Use
switch_to_inferior_and_push_target.
(_initialize_inferior): Mention "-no-connection" option in
the help of "add-inferior" and "clone-inferior" commands.
* inferior.h: Include "process-stratum-target.h".
(interrupt_target_1): Use bool.
(struct inferior) <push_target, unpush_target, target_is_pushed,
find_target_beneath, top_target, process_target, target_at,
m_stack>: New.
(discard_all_inferiors): Delete.
(find_inferior_pid, find_inferior_ptid, number_of_live_inferiors)
(all_inferiors, all_non_exited_inferiors): Add
process_stratum_target parameter.
* infrun.c: Include "gdb_select.h" and <unordered_map>.
(target_last_proc_target): New global.
(follow_fork_inferior): Push target on new inferior. Pass target
to add_thread_silent. Call exec_on_vfork. Handle target's
reference count.
(follow_fork): Adjust get_last_target_status call. Also consider
target.
(follow_exec): Push target on new inferior.
(struct execution_control_state) <target>: New field.
(user_visible_resume_target): New.
(do_target_resume): Call target_async.
(resume_1): Set target's threads_executing flag. Consider resume
target.
(commit_resume_all_targets): New.
(proceed): Also consider resume target. Skip threads of inferiors
with no execution. Commit resumtion in all targets.
(start_remote): Pass current inferior to wait_for_inferior.
(infrun_thread_stop_requested): Consider target as well. Pass
thread_info pointer to clear_inline_frame_state instead of ptid.
(infrun_thread_thread_exit): Consider target as well.
(random_pending_event_thread): New inferior parameter. Use it.
(do_target_wait): Rename to ...
(do_target_wait_1): ... this. Add inferior parameter, and pass it
down.
(threads_are_resumed_pending_p, do_target_wait): New.
(prepare_for_detach): Adjust calls.
(wait_for_inferior): New inferior parameter. Handle it. Use
do_target_wait_1 instead of do_target_wait.
(fetch_inferior_event): Adjust. Switch to representative
inferior. Pass target down.
(set_last_target_status): Add process_stratum_target parameter.
Save target in global.
(get_last_target_status): Add process_stratum_target parameter and
handle it.
(nullify_last_target_wait_ptid): Clear 'target_last_proc_target'.
(context_switch): Check inferior_ptid == null_ptid before calling
inferior_thread().
(get_inferior_stop_soon): Pass down target.
(wait_one): Rename to ...
(poll_one_curr_target): ... this.
(struct wait_one_event): New.
(wait_one): New.
(stop_all_threads): Adjust.
(handle_no_resumed, handle_inferior_event): Adjust to consider the
event's target.
(switch_back_to_stepped_thread): Also consider target.
(print_stop_event): Update.
(normal_stop): Update. Also consider the resume target.
* infrun.h (wait_for_inferior): Remove declaration.
(user_visible_resume_target): New declaration.
(get_last_target_status, set_last_target_status): New
process_stratum_target parameter.
* inline-frame.c (clear_inline_frame_state(ptid_t)): Add
process_stratum_target parameter, and use it.
(clear_inline_frame_state (thread_info*)): New.
* inline-frame.c (clear_inline_frame_state(ptid_t)): Add
process_stratum_target parameter.
(clear_inline_frame_state (thread_info*)): Declare.
* linux-fork.c (delete_checkpoint_command): Pass target down to
find_thread_ptid.
(checkpoint_command): Adjust.
* linux-nat.c (linux_nat_target::follow_fork): Switch to thread
instead of just tweaking inferior_ptid.
(linux_nat_switch_fork): Pass target down to thread_change_ptid.
(exit_lwp): Pass target down to find_thread_ptid.
(attach_proc_task_lwp_callback): Pass target down to
add_thread/set_running/set_executing.
(linux_nat_target::attach): Pass target down to
thread_change_ptid.
(get_detach_signal): Pass target down to find_thread_ptid.
Consider last target status's target.
(linux_resume_one_lwp_throw, resume_lwp)
(linux_handle_syscall_trap, linux_handle_extended_wait, wait_lwp)
(stop_wait_callback, save_stop_reason, linux_nat_filter_event)
(linux_nat_wait_1, resume_stopped_resumed_lwps): Pass target down.
(linux_nat_target::async_wait_fd): New.
(linux_nat_stop_lwp, linux_nat_target::thread_address_space): Pass
target down.
* linux-nat.h (linux_nat_target::async_wait_fd): Declare.
* linux-tdep.c (get_thread_arch_regcache): Pass target down.
* linux-thread-db.c (struct thread_db_info::process_target): New
field.
(add_thread_db_info): Save target.
(get_thread_db_info): New process_stratum_target parameter. Also
match target.
(delete_thread_db_info): New process_stratum_target parameter.
Also match target.
(thread_from_lwp): Adjust to pass down target.
(thread_db_notice_clone): Pass down target.
(check_thread_db_callback): Pass down target.
(try_thread_db_load_1): Always push the thread_db target.
(try_thread_db_load, record_thread): Pass target down.
(thread_db_target::detach): Pass target down. Always unpush the
thread_db target.
(thread_db_target::wait, thread_db_target::mourn_inferior): Pass
target down. Always unpush the thread_db target.
(find_new_threads_callback, thread_db_find_new_threads_2)
(thread_db_target::update_thread_list): Pass target down.
(thread_db_target::pid_to_str): Pass current inferior down.
(thread_db_target::get_thread_local_address): Pass target down.
(thread_db_target::resume, maintenance_check_libthread_db): Pass
target down.
* nto-procfs.c (nto_procfs_target::update_thread_list): Adjust.
* procfs.c (procfs_target::procfs_init_inferior): Declare.
(proc_set_current_signal, do_attach, procfs_target::wait): Adjust.
(procfs_init_inferior): Rename to ...
(procfs_target::procfs_init_inferior): ... this and adjust.
(procfs_target::create_inferior, procfs_notice_thread)
(procfs_do_thread_registers): Adjust.
* ppc-fbsd-tdep.c: Include "inferior.h".
(ppcfbsd_get_thread_local_address): Pass down target.
* proc-service.c (ps_xfer_memory): Switch current inferior and
program space as well.
(get_ps_regcache): Pass target down.
* process-stratum-target.c
(process_stratum_target::thread_address_space)
(process_stratum_target::thread_architecture): Pass target down.
* process-stratum-target.h
(process_stratum_target::threads_executing): New field.
(as_process_stratum_target): New.
* ravenscar-thread.c
(ravenscar_thread_target::update_inferior_ptid): Pass target down.
(ravenscar_thread_target::wait, ravenscar_add_thread): Pass target
down.
* record-btrace.c (record_btrace_target::info_record): Adjust.
(record_btrace_target::record_method)
(record_btrace_target::record_is_replaying)
(record_btrace_target::fetch_registers)
(get_thread_current_frame_id, record_btrace_target::resume)
(record_btrace_target::wait, record_btrace_target::stop): Pass
target down.
* record-full.c (record_full_wait_1): Switch to event thread.
Pass target down.
* regcache.c (regcache::regcache)
(get_thread_arch_aspace_regcache, get_thread_arch_regcache): Add
process_stratum_target parameter and handle it.
(current_thread_target): New global.
(get_thread_regcache): Add process_stratum_target parameter and
handle it. Switch inferior before calling target method.
(get_thread_regcache): Pass target down.
(get_thread_regcache_for_ptid): Pass target down.
(registers_changed_ptid): Add process_stratum_target parameter and
handle it.
(registers_changed_thread, registers_changed): Pass target down.
(test_get_thread_arch_aspace_regcache): New.
(current_regcache_test): Define a couple local test_target_ops
instances and use them for testing.
(readwrite_regcache): Pass process_stratum_target parameter.
(cooked_read_test, cooked_write_test): Pass mock_target down.
* regcache.h (get_thread_regcache, get_thread_arch_regcache)
(get_thread_arch_aspace_regcache): Add process_stratum_target
parameter.
(regcache::target): New method.
(regcache::regcache, regcache::get_thread_arch_aspace_regcache)
(regcache::registers_changed_ptid): Add process_stratum_target
parameter.
(regcache::m_target): New field.
(registers_changed_ptid): Add process_stratum_target parameter.
* remote.c (remote_state::supports_vCont_probed): New field.
(remote_target::async_wait_fd): New method.
(remote_unpush_and_throw): Add remote_target parameter.
(get_current_remote_target): Adjust.
(remote_target::remote_add_inferior): Push target.
(remote_target::remote_add_thread)
(remote_target::remote_notice_new_inferior)
(get_remote_thread_info): Pass target down.
(remote_target::update_thread_list): Skip threads of inferiors
bound to other targets. (remote_target::close): Don't discard
inferiors. (remote_target::add_current_inferior_and_thread)
(remote_target::process_initial_stop_replies)
(remote_target::start_remote)
(remote_target::remote_serial_quit_handler): Pass down target.
(remote_target::remote_unpush_target): New remote_target
parameter. Unpush the target from all inferiors.
(remote_target::remote_unpush_and_throw): New remote_target
parameter. Pass it down.
(remote_target::open_1): Check whether the current inferior has
execution instead of checking whether any inferior is live. Pass
target down.
(remote_target::remote_detach_1): Pass down target. Use
remote_unpush_target.
(extended_remote_target::attach): Pass down target.
(remote_target::remote_vcont_probe): Set supports_vCont_probed.
(remote_target::append_resumption): Pass down target.
(remote_target::append_pending_thread_resumptions)
(remote_target::remote_resume_with_hc, remote_target::resume)
(remote_target::commit_resume): Pass down target.
(remote_target::remote_stop_ns): Check supports_vCont_probed.
(remote_target::interrupt_query)
(remote_target::remove_new_fork_children)
(remote_target::check_pending_events_prevent_wildcard_vcont)
(remote_target::remote_parse_stop_reply)
(remote_target::process_stop_reply): Pass down target.
(first_remote_resumed_thread): New remote_target parameter. Pass
it down.
(remote_target::wait_as): Pass down target.
(unpush_and_perror): New remote_target parameter. Pass it down.
(remote_target::readchar, remote_target::remote_serial_write)
(remote_target::getpkt_or_notif_sane_1)
(remote_target::kill_new_fork_children, remote_target::kill): Pass
down target.
(remote_target::mourn_inferior): Pass down target. Use
remote_unpush_target.
(remote_target::core_of_thread)
(remote_target::remote_btrace_maybe_reopen): Pass down target.
(remote_target::pid_to_exec_file)
(remote_target::thread_handle_to_thread_info): Pass down target.
(remote_target::async_wait_fd): New.
* riscv-fbsd-tdep.c: Include "inferior.h".
(riscv_fbsd_get_thread_local_address): Pass down target.
* sol2-tdep.c (sol2_core_pid_to_str): Pass down target.
* sol-thread.c (sol_thread_target::wait, ps_lgetregs, ps_lsetregs)
(ps_lgetfpregs, ps_lsetfpregs, sol_update_thread_list_callback):
Adjust.
* solib-spu.c (spu_skip_standalone_loader): Pass down target.
* solib-svr4.c (enable_break): Pass down target.
* spu-multiarch.c (parse_spufs_run): Pass down target.
* spu-tdep.c (spu2ppu_sniffer): Pass down target.
* target-delegates.c: Regenerate.
* target.c (g_target_stack): Delete.
(current_top_target): Return the current inferior's top target.
(target_has_execution_1): Refer to the passed-in inferior's top
target.
(target_supports_terminal_ours): Check whether the initial
inferior was already created.
(decref_target): New.
(target_stack::push): Incref/decref the target.
(push_target, push_target, unpush_target): Adjust.
(target_stack::unpush): Defref target.
(target_is_pushed): Return bool. Adjust to refer to the current
inferior's target stack.
(dispose_inferior): Delete, and inline parts ...
(target_preopen): ... here. Only dispose of the current inferior.
(target_detach): Hold strong target reference while detaching.
Pass target down.
(target_thread_name): Add assertion.
(target_resume): Pass down target.
(target_ops::beneath, find_target_at): Adjust to refer to the
current inferior's target stack.
(get_dummy_target): New.
(target_pass_ctrlc): Pass the Ctrl-C to the first inferior that
has a thread running.
(initialize_targets): Rename to ...
(_initialize_target): ... this.
* target.h: Include "gdbsupport/refcounted-object.h".
(struct target_ops): Inherit refcounted_object.
(target_ops::shortname, target_ops::longname): Make const.
(target_ops::async_wait_fd): New method.
(decref_target): Declare.
(struct target_ops_ref_policy): New.
(target_ops_ref): New typedef.
(get_dummy_target): Declare function.
(target_is_pushed): Return bool.
* thread-iter.c (all_matching_threads_iterator::m_inf_matches)
(all_matching_threads_iterator::all_matching_threads_iterator):
Handle filter target.
* thread-iter.h (struct all_matching_threads_iterator, struct
all_matching_threads_range, class all_non_exited_threads_range):
Filter by target too. Remove explicit.
* thread.c (threads_executing): Delete.
(inferior_thread): Pass down current inferior.
(clear_thread_inferior_resources): Pass down thread pointer
instead of ptid_t.
(add_thread_silent, add_thread_with_info, add_thread): Add
process_stratum_target parameter. Use it for thread and inferior
searches.
(is_current_thread): New.
(thread_info::deletable): Use it.
(find_thread_ptid, thread_count, in_thread_list)
(thread_change_ptid, set_resumed, set_running): New
process_stratum_target parameter. Pass it down.
(set_executing): New process_stratum_target parameter. Pass it
down. Adjust reference to 'threads_executing'.
(threads_are_executing): New process_stratum_target parameter.
Adjust reference to 'threads_executing'.
(set_stop_requested, finish_thread_state): New
process_stratum_target parameter. Pass it down.
(switch_to_thread): Also match inferior.
(switch_to_thread): New process_stratum_target parameter. Pass it
down.
(update_threads_executing): Reimplement.
* top.c (quit_force): Pop targets from all inferior.
(gdb_init): Don't call initialize_targets.
* windows-nat.c (windows_nat_target) <get_windows_debug_event>:
Declare.
(windows_add_thread, windows_delete_thread): Adjust.
(get_windows_debug_event): Rename to ...
(windows_nat_target::get_windows_debug_event): ... this. Adjust.
* tracefile-tfile.c (tfile_target_open): Pass down target.
* gdbsupport/common-gdbthread.h (struct process_stratum_target):
Forward declare.
(switch_to_thread): Add process_stratum_target parameter.
* mi/mi-interp.c (mi_on_resume_1): Add process_stratum_target
parameter. Use it.
(mi_on_resume): Pass target down.
* nat/fork-inferior.c (startup_inferior): Add
process_stratum_target parameter. Pass it down.
* nat/fork-inferior.h (startup_inferior): Add
process_stratum_target parameter.
* python/py-threadevent.c (py_get_event_thread): Pass target down.
gdb/gdbserver/ChangeLog:
2020-01-10 Pedro Alves <palves@redhat.com>
* fork-child.c (post_fork_inferior): Pass target down to
startup_inferior.
* inferiors.c (switch_to_thread): Add process_stratum_target
parameter.
* lynx-low.c (lynx_target_ops): Now a process_stratum_target.
* nto-low.c (nto_target_ops): Now a process_stratum_target.
* linux-low.c (linux_target_ops): Now a process_stratum_target.
* remote-utils.c (prepare_resume_reply): Pass the target to
switch_to_thread.
* target.c (the_target): Now a process_stratum_target.
(done_accessing_memory): Pass the target to switch_to_thread.
(set_target_ops): Ajust to use process_stratum_target.
* target.h (struct target_ops): Rename to ...
(struct process_stratum_target): ... this.
(the_target, set_target_ops): Adjust.
(prepare_to_access_memory): Adjust comment.
* win32-low.c (child_xfer_memory): Adjust to use
process_stratum_target.
(win32_target_ops): Now a process_stratum_target.
2020-01-11 04:06:08 +08:00
|
|
|
thread_info *tp = find_thread_ptid (current_inferior (), inferior_ptid);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
if (tp == NULL)
|
|
|
|
error (_("No thread."));
|
|
|
|
|
|
|
|
size = 10;
|
|
|
|
btinfo = &tp->btrace;
|
|
|
|
|
|
|
|
btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
|
|
|
|
if (begin == end)
|
|
|
|
{
|
|
|
|
printf_unfiltered (_("No trace.\n"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
|
|
|
|
{
|
|
|
|
from = to;
|
|
|
|
|
|
|
|
if (end - from < size)
|
|
|
|
size = end - from;
|
|
|
|
to = from + size;
|
|
|
|
}
|
|
|
|
else if (strcmp (arg, "-") == 0)
|
|
|
|
{
|
|
|
|
to = from;
|
|
|
|
|
|
|
|
if (to - begin < size)
|
|
|
|
size = to - begin;
|
|
|
|
from = to - size;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
from = get_uint (&arg);
|
|
|
|
if (end <= from)
|
|
|
|
error (_("'%u' is out of range."), from);
|
|
|
|
|
|
|
|
arg = skip_spaces (arg);
|
|
|
|
if (*arg == ',')
|
|
|
|
{
|
|
|
|
arg = skip_spaces (++arg);
|
|
|
|
|
|
|
|
if (*arg == '+')
|
|
|
|
{
|
|
|
|
arg += 1;
|
|
|
|
size = get_context_size (&arg);
|
|
|
|
|
|
|
|
no_chunk (arg);
|
|
|
|
|
|
|
|
if (end - from < size)
|
|
|
|
size = end - from;
|
|
|
|
to = from + size;
|
|
|
|
}
|
|
|
|
else if (*arg == '-')
|
|
|
|
{
|
|
|
|
arg += 1;
|
|
|
|
size = get_context_size (&arg);
|
|
|
|
|
|
|
|
no_chunk (arg);
|
|
|
|
|
|
|
|
/* Include the packet given as first argument. */
|
|
|
|
from += 1;
|
|
|
|
to = from;
|
|
|
|
|
|
|
|
if (to - begin < size)
|
|
|
|
size = to - begin;
|
|
|
|
from = to - size;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
to = get_uint (&arg);
|
|
|
|
|
|
|
|
/* Include the packet at the second argument and silently
|
|
|
|
truncate the range. */
|
|
|
|
if (to < end)
|
|
|
|
to += 1;
|
|
|
|
else
|
|
|
|
to = end;
|
|
|
|
|
|
|
|
no_chunk (arg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
no_chunk (arg);
|
|
|
|
|
|
|
|
if (end - from < size)
|
|
|
|
size = end - from;
|
|
|
|
to = from + size;
|
|
|
|
}
|
|
|
|
|
|
|
|
dont_repeat ();
|
|
|
|
}
|
|
|
|
|
|
|
|
btrace_maint_print_packets (btinfo, from, to);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The "maintenance btrace clear-packet-history" command. */
|
|
|
|
|
|
|
|
static void
|
2017-09-13 11:12:42 +08:00
|
|
|
maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
|
|
|
if (args != NULL && *args != 0)
|
|
|
|
error (_("Invalid argument."));
|
|
|
|
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
if (inferior_ptid == null_ptid)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
error (_("No thread."));
|
|
|
|
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
thread_info *tp = inferior_thread ();
|
|
|
|
btrace_thread_info *btinfo = &tp->btrace;
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
/* Must clear the maint data before - it depends on BTINFO->DATA. */
|
|
|
|
btrace_maint_clear (btinfo);
|
2018-06-08 05:34:36 +08:00
|
|
|
btinfo->data.clear ();
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The "maintenance btrace clear" command. */
|
|
|
|
|
|
|
|
static void
|
2017-09-13 11:12:42 +08:00
|
|
|
maint_btrace_clear_cmd (const char *args, int from_tty)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
|
|
|
if (args != NULL && *args != 0)
|
|
|
|
error (_("Invalid argument."));
|
|
|
|
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
if (inferior_ptid == null_ptid)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
error (_("No thread."));
|
|
|
|
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
thread_info *tp = inferior_thread ();
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
btrace_clear (tp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The "maintenance info btrace" command. */
|
|
|
|
|
|
|
|
static void
|
2017-09-13 11:12:42 +08:00
|
|
|
maint_info_btrace_cmd (const char *args, int from_tty)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
|
|
|
struct btrace_thread_info *btinfo;
|
|
|
|
const struct btrace_config *conf;
|
|
|
|
|
|
|
|
if (args != NULL && *args != 0)
|
|
|
|
error (_("Invalid argument."));
|
|
|
|
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
if (inferior_ptid == null_ptid)
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
error (_("No thread."));
|
|
|
|
|
Use thread_info and inferior pointers more throughout
This is more preparation bits for multi-target support.
In a multi-target scenario, we need to address the case of different
processes/threads running on different targets that happen to have the
same PID/PTID. E.g., we can have both process 123 in target 1, and
process 123 in target 2, while they're in reality different processes
running on different machines. Or maybe we've loaded multiple
instances of the same core file. Etc.
To address this, in my WIP multi-target branch, threads and processes
are uniquely identified by the (process_stratum target_ops *, ptid_t)
and (process_stratum target_ops *, pid) tuples respectively. I.e.,
each process_stratum instance has its own thread/process number space.
As you can imagine, that requires passing around target_ops * pointers
in a number of functions where we're currently passing only a ptid_t
or an int. E.g., when we look up a thread_info object by ptid_t in
find_thread_ptid, the ptid_t alone isn't sufficient.
In many cases though, we already have the thread_info or inferior
pointer handy, but we "lose" it somewhere along the call stack, only
to look it up again by ptid_t/pid. Since thread_info or inferior
objects know their parent target, if we pass around thread_info or
inferior pointers when possible, we avoid having to add extra
target_ops parameters to many functions, and also, we eliminate a
number of by ptid_t/int lookups.
So that's what this patch does. In a bit more detail:
- Changes a number of functions and methods to take a thread_info or
inferior pointer instead of a ptid_t or int parameter.
- Changes a number of structure fields from ptid_t/int to inferior or
thread_info pointers.
- Uses the inferior_thread() function whenever possible instead of
inferior_ptid.
- Uses thread_info pointers directly when possible instead of the
is_running/is_stopped etc. routines that require a lookup.
- A number of functions are eliminated along the way, such as:
int valid_gdb_inferior_id (int num);
int pid_to_gdb_inferior_id (int pid);
int gdb_inferior_id_to_pid (int num);
int in_inferior_list (int pid);
- A few structures and places hold a thread_info pointer across
inferior execution, so now they take a strong reference to the
(refcounted) thread_info object to avoid the thread_info pointer
getting stale. This is done in enable_thread_stack_temporaries and
in the infcall.c code.
- Related, there's a spot in infcall.c where using a RAII object to
handle the refcount would be handy, so a gdb::ref_ptr specialization
for thread_info is added (thread_info_ref, in gdbthread.h), along
with a gdb_ref_ptr policy that works for all refcounted_object types
(in common/refcounted-object.h).
gdb/ChangeLog:
2018-06-21 Pedro Alves <palves@redhat.com>
* ada-lang.h (ada_get_task_number): Take a thread_info pointer
instead of a ptid_t. All callers adjusted.
* ada-tasks.c (ada_get_task_number): Likewise. All callers
adjusted.
(print_ada_task_info, display_current_task_id, task_command_1):
Adjust.
* breakpoint.c (watchpoint_in_thread_scope): Adjust to use
inferior_thread.
(breakpoint_kind): Adjust.
(remove_breakpoints_pid): Rename to ...
(remove_breakpoints_inf): ... this. Adjust to take an inferior
pointer. All callers adjusted.
(bpstat_clear_actions): Use inferior_thread.
(get_bpstat_thread): New.
(bpstat_do_actions): Use it.
(bpstat_check_breakpoint_conditions, bpstat_stop_status): Adjust
to take a thread_info pointer. All callers adjusted.
(set_longjmp_breakpoint_for_call_dummy, set_momentary_breakpoint)
(breakpoint_re_set_thread): Use inferior_thread.
* breakpoint.h (struct inferior): Forward declare.
(bpstat_stop_status): Update.
(remove_breakpoints_pid): Delete.
(remove_breakpoints_inf): New.
* bsd-uthread.c (bsd_uthread_target::wait)
(bsd_uthread_target::update_thread_list): Use find_thread_ptid.
* btrace.c (btrace_add_pc, btrace_enable, btrace_fetch)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd): Adjust.
(maint_btrace_clear_cmd, maint_info_btrace_cmd): Adjust to use
inferior_thread.
* cli/cli-interp.c: Include "inferior.h".
* common/refcounted-object.h (struct
refcounted_object_ref_policy): New.
* compile/compile-object-load.c: Include gdbthread.h.
(store_regs): Use inferior_thread.
* corelow.c (core_target::close): Use current_inferior.
(core_target_open): Adjust to use first_thread_of_inferior and use
the current inferior.
* ctf.c (ctf_target::close): Adjust to use current_inferior.
* dummy-frame.c (dummy_frame_id) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
(dummy_frame_pop, dummy_frame_discard, register_dummy_frame_dtor):
Take a thread_info pointer instead of a ptid_t.
* dummy-frame.h (dummy_frame_push, dummy_frame_pop)
(dummy_frame_discard, register_dummy_frame_dtor): Take a
thread_info pointer instead of a ptid_t.
* elfread.c: Include "inferior.h".
(elf_gnu_ifunc_resolver_stop, elf_gnu_ifunc_resolver_return_stop):
Use inferior_thread.
* eval.c (evaluate_subexp): Likewise.
* frame.c (frame_pop, has_stack_frames, find_frame_sal): Use
inferior_thread.
* gdb_proc_service.h (struct thread_info): Forward declare.
(struct ps_prochandle) <ptid>: Delete, replaced by ...
<thread>: ... this new field. All references adjusted.
* gdbarch.h, gdbarch.c: Regenerate.
* gdbarch.sh (get_syscall_number): Replace 'ptid' parameter with a
'thread' parameter. All implementations and callers adjusted.
* gdbthread.h (thread_info) <set_running>: New method.
(delete_thread, delete_thread_silent): Take a thread_info pointer
instead of a ptid.
(global_thread_id_to_ptid, ptid_to_global_thread_id): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_live_thread_of_process): Delete, replaced by ...
(any_live_thread_of_inferior): ... this new function. All callers
adjusted.
(switch_to_thread, switch_to_no_thread): Declare.
(is_executing): Delete.
(enable_thread_stack_temporaries): Update comment.
<enable_thread_stack_temporaries>: Take a thread_info pointer
instead of a ptid_t. Incref the thread.
<~enable_thread_stack_temporaries>: Decref the thread.
<m_ptid>: Delete
<m_thr>: New.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(get_last_thread_stack_temporary)
(value_in_thread_stack_temporaries, can_access_registers_thread):
Take a thread_info pointer instead of a ptid_t. All callers
adjusted.
* infcall.c (get_call_return_value): Use inferior_thread.
(run_inferior_call): Work with thread pointers instead of ptid_t.
(call_function_by_hand_dummy): Work with thread pointers instead
of ptid_t. Use thread_info_ref.
* infcmd.c (proceed_thread_callback): Access thread's state
directly.
(ensure_valid_thread, ensure_not_running): Use inferior_thread,
access thread's state directly.
(continue_command): Use inferior_thread.
(info_program_command): Use find_thread_ptid and access thread
state directly.
(proceed_after_attach_callback): Use thread state directly.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(exit_inferior): Take an inferior pointer instead of a pid. All
callers adjusted.
(exit_inferior_silent): New.
(detach_inferior): Delete.
(valid_gdb_inferior_id, pid_to_gdb_inferior_id)
(gdb_inferior_id_to_pid, in_inferior_list): Delete.
(detach_inferior_command, kill_inferior_command): Use
find_inferior_id instead of valid_gdb_inferior_id and
gdb_inferior_id_to_pid.
(inferior_command): Use inferior and thread pointers.
* inferior.h (struct thread_info): Forward declare.
(notice_new_inferior): Take a thread_info pointer instead of a
ptid_t. All callers adjusted.
(detach_inferior): Delete declaration.
(exit_inferior, exit_inferior_silent): Take an inferior pointer
instead of a pid. All callers adjusted.
(gdb_inferior_id_to_pid, pid_to_gdb_inferior_id, in_inferior_list)
(valid_gdb_inferior_id): Delete.
* infrun.c (follow_fork_inferior, proceed_after_vfork_done)
(handle_vfork_child_exec_or_exit, follow_exec): Adjust.
(struct displaced_step_inferior_state) <pid>: Delete, replaced by
...
<inf>: ... this new field.
<step_ptid>: Delete, replaced by ...
<step_thread>: ... this new field.
(get_displaced_stepping_state): Take an inferior pointer instead
of a pid. All callers adjusted.
(displaced_step_in_progress_any_inferior): Adjust.
(displaced_step_in_progress_thread): Take a thread pointer instead
of a ptid_t. All callers adjusted.
(displaced_step_in_progress, add_displaced_stepping_state): Take
an inferior pointer instead of a pid. All callers adjusted.
(get_displaced_step_closure_by_addr): Adjust.
(remove_displaced_stepping_state): Take an inferior pointer
instead of a pid. All callers adjusted.
(displaced_step_prepare_throw, displaced_step_prepare)
(displaced_step_fixup): Take a thread pointer instead of a ptid_t.
All callers adjusted.
(start_step_over): Adjust.
(infrun_thread_ptid_changed): Remove bit updating ptids in the
displaced step queue.
(do_target_resume): Adjust.
(fetch_inferior_event): Use inferior_thread.
(context_switch, get_inferior_stop_soon): Take an
execution_control_state pointer instead of a ptid_t. All callers
adjusted.
(switch_to_thread_cleanup): Delete.
(stop_all_threads): Use scoped_restore_current_thread.
* inline-frame.c: Include "gdbthread.h".
(inline_state) <inline_state>: Take a thread pointer instead of a
ptid_t. All callers adjusted.
<ptid>: Delete, replaced by ...
<thread>: ... this new field.
(find_inline_frame_state): Take a thread pointer instead of a
ptid_t. All callers adjusted.
(skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Take a thread
pointer instead of a ptid_t. All callers adjusted.
* inline-frame.h (skip_inline_frames, step_into_inline_frame)
(inline_skipped_frames, inline_skipped_symbol): Likewise.
* linux-fork.c (delete_checkpoint_command): Adjust to use thread
pointers directly.
* linux-nat.c (get_detach_signal): Likewise.
* linux-thread-db.c (thread_from_lwp): New 'stopped' parameter.
(thread_db_notice_clone): Adjust.
(thread_db_find_new_threads_silently)
(thread_db_find_new_threads_2, thread_db_find_new_threads_1): Take
a thread pointer instead of a ptid_t. All callers adjusted.
* mi/mi-cmd-var.c: Include "inferior.h".
(mi_cmd_var_update_iter): Update to use thread pointers.
* mi/mi-interp.c (mi_new_thread): Update to use the thread's
inferior directly.
(mi_output_running_pid, mi_inferior_count): Delete, bits factored
out to ...
(mi_output_running): ... this new function.
(mi_on_resume_1): Adjust to use it.
(mi_user_selected_context_changed): Adjust to use inferior_thread.
* mi/mi-main.c (proceed_thread): Adjust to use thread pointers
directly.
(interrupt_thread_callback): : Adjust to use thread and inferior
pointers.
* proc-service.c: Include "gdbthread.h".
(ps_pglobal_lookup): Adjust to use the thread's inferior directly.
* progspace-and-thread.c: Include "inferior.h".
* progspace.c: Include "inferior.h".
* python/py-exitedevent.c (create_exited_event_object): Adjust to
hold a reference to an inferior_object.
* python/py-finishbreakpoint.c (bpfinishpy_init): Adjust to use
inferior_thread.
* python/py-inferior.c (struct inferior_object): Give the type a
tag name instead of a typedef.
(python_on_normal_stop): No need to check if the current thread is
listed.
(inferior_to_inferior_object): Change return type to
inferior_object. All callers adjusted.
(find_thread_object): Delete, bits factored out to ...
(thread_to_thread_object): ... this new function.
* python/py-infthread.c (create_thread_object): Use
inferior_to_inferior_object.
(thpy_is_stopped): Use thread pointer directly.
(gdbpy_selected_thread): Use inferior_thread.
* python/py-record-btrace.c (btpy_list_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(btpy_insn_or_gap_new): Drop const.
(btpy_list_new): Take a thread pointer instead of a ptid_t. All
callers adjusted.
* python/py-record.c: Include "gdbthread.h".
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
(gdbpy_current_recording): Use inferior_thread.
* python/py-record.h (recpy_record_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_element_object) <ptid>: Delete
field, replaced with ...
<thread>: ... this new field. All users adjusted.
(recpy_insn_new, recpy_func_new): Take a thread pointer instead of
a ptid_t. All callers adjusted.
* python/py-threadevent.c: Include "gdbthread.h".
(get_event_thread): Use thread_to_thread_object.
* python/python-internal.h (struct inferior_object): Forward
declare.
(find_thread_object, find_inferior_object): Delete declarations.
(thread_to_thread_object, inferior_to_inferior_object): New
declarations.
* record-btrace.c: Include "inferior.h".
(require_btrace_thread): Use inferior_thread.
(record_btrace_frame_sniffer)
(record_btrace_tailcall_frame_sniffer): Use inferior_thread.
(get_thread_current_frame): Use scoped_restore_current_thread and
switch_to_thread.
(get_thread_current_frame): Use thread pointer directly.
(record_btrace_replay_at_breakpoint): Use thread's inferior
pointer directly.
* record-full.c: Include "inferior.h".
* regcache.c: Include "gdbthread.h".
(get_thread_arch_regcache): Use the inferior's address space
directly.
(get_thread_regcache, registers_changed_thread): New.
* regcache.h (get_thread_regcache(thread_info *thread)): New
overload.
(registers_changed_thread): New.
(remote_target) <remote_detach_1>: Swap order of parameters.
(remote_add_thread): <remote_add_thread>: Return the new thread.
(get_remote_thread_info(ptid_t)): New overload.
(remote_target::remote_notice_new_inferior): Use thread pointers
directly.
(remote_target::process_initial_stop_replies): Use
thread_info::set_running.
(remote_target::remote_detach_1, remote_target::detach)
(extended_remote_target::detach): Adjust.
* stack.c (frame_show_address): Use inferior_thread.
* target-debug.h (target_debug_print_thread_info_pp): New.
* target-delegates.c: Regenerate.
* target.c (default_thread_address_space): Delete.
(memory_xfer_partial_1): Use current_inferior.
(target_detach): Use current_inferior.
(target_thread_address_space): Delete.
(generic_mourn_inferior): Use current_inferior.
* target.h (struct target_ops) <thread_address_space>: Delete.
(target_thread_address_space): Delete.
* thread.c (init_thread_list): Use ALL_THREADS_SAFE. Use thread
pointers directly.
(delete_thread_1, delete_thread, delete_thread_silent): Take a
thread pointer instead of a ptid_t. Adjust all callers.
(ptid_to_global_thread_id, global_thread_id_to_ptid): Delete.
(first_thread_of_process): Delete, replaced by ...
(first_thread_of_inferior): ... this new function. All callers
adjusted.
(any_thread_of_process): Rename to ...
(any_thread_of_inferior): ... this, and take an inferior pointer.
(any_live_thread_of_process): Rename to ...
(any_live_thread_of_inferior): ... this, and take an inferior
pointer.
(thread_stack_temporaries_enabled_p, push_thread_stack_temporary)
(value_in_thread_stack_temporaries)
(get_last_thread_stack_temporary): Take a thread pointer instead
of a ptid_t. Adjust all callers.
(thread_info::set_running): New.
(validate_registers_access): Use inferior_thread.
(can_access_registers_ptid): Rename to ...
(can_access_registers_thread): ... this, and take a thread
pointer.
(print_thread_info_1): Adjust to compare thread pointers instead
of ptids.
(switch_to_no_thread, switch_to_thread): Make extern.
(scoped_restore_current_thread::~scoped_restore_current_thread):
Use m_thread pointer directly.
(scoped_restore_current_thread::scoped_restore_current_thread):
Use inferior_thread.
(thread_command): Use thread pointer directly.
(thread_num_make_value_helper): Use inferior_thread.
* top.c (execute_command): Use inferior_thread.
* tui/tui-interp.c: Include "inferior.h".
* varobj.c (varobj_create): Use inferior_thread.
(value_of_root_1): Use find_thread_global_id instead of
global_thread_id_to_ptid.
2018-06-22 00:09:31 +08:00
|
|
|
thread_info *tp = inferior_thread ();
|
|
|
|
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
btinfo = &tp->btrace;
|
|
|
|
|
|
|
|
conf = btrace_conf (btinfo);
|
|
|
|
if (conf == NULL)
|
|
|
|
error (_("No btrace configuration."));
|
|
|
|
|
|
|
|
printf_unfiltered (_("Format: %s.\n"),
|
|
|
|
btrace_format_string (conf->format));
|
|
|
|
|
|
|
|
switch (conf->format)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BTRACE_FORMAT_BTS:
|
2019-09-16 21:12:27 +08:00
|
|
|
printf_unfiltered (_("Number of packets: %zu.\n"),
|
|
|
|
btinfo->data.variant.bts.blocks->size ());
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
#if defined (HAVE_LIBIPT)
|
|
|
|
case BTRACE_FORMAT_PT:
|
|
|
|
{
|
|
|
|
struct pt_version version;
|
|
|
|
|
|
|
|
version = pt_library_version ();
|
|
|
|
printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
|
|
|
|
version.minor, version.build,
|
|
|
|
version.ext != NULL ? version.ext : "");
|
|
|
|
|
|
|
|
btrace_maint_update_pt_packets (btinfo);
|
2019-09-20 01:17:59 +08:00
|
|
|
printf_unfiltered (_("Number of packets: %zu.\n"),
|
|
|
|
((btinfo->maint.variant.pt.packets == nullptr)
|
|
|
|
? 0 : btinfo->maint.variant.pt.packets->size ()));
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif /* defined (HAVE_LIBIPT) */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The "maint show btrace pt skip-pad" show value function. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
|
|
|
|
struct cmd_list_element *c,
|
|
|
|
const char *value)
|
|
|
|
{
|
|
|
|
fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Initialize btrace maintenance commands. */
|
|
|
|
|
2020-01-14 03:01:38 +08:00
|
|
|
void _initialize_btrace ();
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
void
|
2020-01-14 03:01:38 +08:00
|
|
|
_initialize_btrace ()
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
{
|
|
|
|
add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
|
|
|
|
_("Info about branch tracing data."), &maintenanceinfolist);
|
|
|
|
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
add_basic_prefix_cmd ("btrace", class_maintenance,
|
|
|
|
_("Branch tracing maintenance commands."),
|
|
|
|
&maint_btrace_cmdlist, "maintenance btrace ",
|
|
|
|
0, &maintenancelist);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
add_basic_prefix_cmd ("btrace", class_maintenance, _("\
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
Set branch tracing specific variables."),
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
&maint_btrace_set_cmdlist, "maintenance set btrace ",
|
|
|
|
0, &maintenance_set_cmdlist);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
add_basic_prefix_cmd ("pt", class_maintenance, _("\
|
2016-01-12 23:03:11 +08:00
|
|
|
Set Intel Processor Trace specific variables."),
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
&maint_btrace_pt_set_cmdlist,
|
|
|
|
"maintenance set btrace pt ",
|
|
|
|
0, &maint_btrace_set_cmdlist);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
add_show_prefix_cmd ("btrace", class_maintenance, _("\
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
Show branch tracing specific variables."),
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
&maint_btrace_show_cmdlist, "maintenance show btrace ",
|
|
|
|
0, &maintenance_show_cmdlist);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
add_show_prefix_cmd ("pt", class_maintenance, _("\
|
2016-01-12 23:03:11 +08:00
|
|
|
Show Intel Processor Trace specific variables."),
|
Replace most calls to help_list and cmd_show_list
Currently there are many prefix commands that do nothing but call
either help_list or cmd_show_list. I happened to notice that one such
call, for "set print type", used the wrong command list parameter,
causing incorrect output.
Rather than fix this bug in isolation, I decided to eliminate this
possibility by adding two new ways to add prefix commands, which
simply route the call to help_list or cmd_show_list, as appropriate.
This makes it impossible for a mismatch to occur.
In some cases, a bit of output was removed; however, I don't think
this output in general was very useful. It seemed redundant with
what's already printed by help_list. A representative example is this
hunk, removed from ada-lang.c:
- printf_unfiltered (_(\
-"\"set ada\" must be followed by the name of a setting.\n"));
This simplified the CLI style set/show commands quite a bit, and
allowed the deletion of a macro.
This also cleans up some unusual code in windows-tdep.c.
Tested on x86-64 Fedora 30. Note that I have no way to build the
go32-nat.c change.
gdb/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* auto-load.c (show_auto_load_cmd): Remove.
(auto_load_show_cmdlist_get): Use add_show_prefix_cmd.
* arc-tdep.c (_initialize_arc_tdep): Use add_show_prefix_cmd.
(maintenance_print_arc_command): Remove.
* tui/tui-win.c (tui_command): Remove.
(tui_get_cmd_list): Use add_basic_prefix_cmd.
* tui/tui-layout.c (tui_layout_command): Remove.
(_initialize_tui_layout): Use add_basic_prefix_cmd.
* python/python.c (user_set_python, user_show_python): Remove.
(_initialize_python): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* guile/guile.c (set_guile_command, show_guile_command): Remove.
(install_gdb_commands): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_guile_command): Remove.
* dwarf2/read.c (set_dwarf_cmd, show_dwarf_cmd): Remove.
(_initialize_dwarf2_read): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-style.h (class cli_style_option) <add_setshow_commands>:
Remove do_set and do_show parameters.
* cli/cli-style.c (set_style, show_style): Remove.
(_initialize_cli_style): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(cli_style_option::add_setshow_commands): Remove do_set and
do_show parameters.
(cli_style_option::add_setshow_commands): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
(STYLE_ADD_SETSHOW_COMMANDS): Remove macro.
(set_style_name): Remove.
* cli/cli-dump.c (dump_command, append_command): Remove.
(srec_dump_command, ihex_dump_command, verilog_dump_command)
(tekhex_dump_command, binary_dump_command)
(binary_append_command): Remove.
(_initialize_cli_dump): Use add_basic_prefix_cmd.
* windows-tdep.c (w32_prefix_command_valid): Remove global.
(init_w32_command_list): Remove; move into ...
(_initialize_windows_tdep): ... here. Use add_basic_prefix_cmd.
* valprint.c (set_print, show_print, set_print_raw)
(show_print_raw): Remove.
(_initialize_valprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* typeprint.c (set_print_type, show_print_type): Remove.
(_initialize_typeprint): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record.c (set_record_command, show_record_command): Remove.
(_initialize_record): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cli/cli-cmds.c (_initialize_cli_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(info_command, show_command, set_debug, show_debug): Remove.
* top.h (set_history, show_history): Don't declare.
* top.c (set_history, show_history): Remove.
* target-descriptions.c (set_tdesc_cmd, show_tdesc_cmd)
(unset_tdesc_cmd): Remove.
(_initialize_target_descriptions): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* symtab.c (info_module_command): Remove.
(_initialize_symtab): Use add_basic_prefix_cmd.
* symfile.c (overlay_command): Remove.
(_initialize_symfile): Use add_basic_prefix_cmd.
* sparc64-tdep.c (info_adi_command): Remove.
(_initialize_sparc64_adi_tdep): Use add_basic_prefix_cmd.
* sh-tdep.c (show_sh_command, set_sh_command): Remove.
(_initialize_sh_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* serial.c (serial_set_cmd, serial_show_cmd): Remove.
(_initialize_serial): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ser-tcp.c (set_tcp_cmd, show_tcp_cmd): Remove.
(_initialize_ser_tcp): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* rs6000-tdep.c (set_powerpc_command, show_powerpc_command)
(_initialize_rs6000_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* riscv-tdep.c (show_riscv_command, set_riscv_command)
(show_debug_riscv_command, set_debug_riscv_command): Remove.
(_initialize_riscv_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* remote.c (remote_command, set_remote_cmd): Remove.
(_initialize_remote): Use add_basic_prefix_cmd.
* record-full.c (set_record_full_command)
(show_record_full_command): Remove.
(_initialize_record_full): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* record-btrace.c (cmd_set_record_btrace)
(cmd_show_record_btrace, cmd_set_record_btrace_bts)
(cmd_show_record_btrace_bts, cmd_set_record_btrace_pt)
(cmd_show_record_btrace_pt): Remove.
(_initialize_record_btrace): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ravenscar-thread.c (set_ravenscar_command)
(show_ravenscar_command): Remove.
(_initialize_ravenscar): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* mips-tdep.c (show_mips_command, set_mips_command)
(_initialize_mips_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint.c (maintenance_command, maintenance_info_command)
(maintenance_check_command, maintenance_print_command)
(maintenance_set_cmd, maintenance_show_cmd): Remove.
(_initialize_maint_cmds): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
(show_per_command_cmd): Remove.
* maint-test-settings.c (maintenance_set_test_settings_cmd):
Remove.
(maintenance_show_test_settings_cmd): Remove.
(_initialize_maint_test_settings): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* maint-test-options.c (maintenance_test_options_command):
Remove.
(_initialize_maint_test_options): Use add_basic_prefix_cmd.
* macrocmd.c (macro_command): Remove
(_initialize_macrocmd): Use add_basic_prefix_cmd.
* language.c (set_check, show_check): Remove.
(_initialize_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* infcmd.c (unset_command): Remove.
(_initialize_infcmd): Use add_basic_prefix_cmd.
* i386-tdep.c (set_mpx_cmd, show_mpx_cmd): Remove.
(_initialize_i386_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* go32-nat.c (go32_info_dos_command): Remove.
(_initialize_go32_nat): Use add_basic_prefix_cmd.
* cli/cli-decode.c (do_prefix_cmd, add_basic_prefix_cmd)
(do_show_prefix_cmd, add_show_prefix_cmd): New functions.
* frame.c (set_backtrace_cmd, show_backtrace_cmd): Remove.
(_initialize_frame): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* dcache.c (set_dcache_command, show_dcache_command): Remove.
(_initialize_dcache): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* cp-support.c (maint_cplus_command): Remove.
(_initialize_cp_support): Use add_basic_prefix_cmd.
* btrace.c (maint_btrace_cmd, maint_btrace_set_cmd)
(maint_btrace_show_cmd, maint_btrace_pt_set_cmd)
(maint_btrace_pt_show_cmd, _initialize_btrace): Use
add_basic_prefix_cmd, add_show_prefix_cmd.
* breakpoint.c (save_command): Remove.
(_initialize_breakpoint): Use add_basic_prefix_cmd.
* arm-tdep.c (set_arm_command, show_arm_command): Remove.
(_initialize_arm_tdep): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* ada-lang.c (maint_set_ada_cmd, maint_show_ada_cmd)
(set_ada_command, show_ada_command): Remove.
(_initialize_ada_language): Use add_basic_prefix_cmd,
add_show_prefix_cmd.
* command.h (add_basic_prefix_cmd, add_show_prefix_cmd): Declare.
gdb/testsuite/ChangeLog
2020-04-17 Tom Tromey <tromey@adacore.com>
* gdb.cp/maint.exp (test_help): Simplify multiple_help_body.
Update tests.
* gdb.btrace/cpu.exp: Update tests.
* gdb.base/maint.exp: Update tests.
* gdb.base/default.exp: Update tests.
* gdb.base/completion.exp: Update tests.
2020-04-17 21:27:14 +08:00
|
|
|
&maint_btrace_pt_show_cmdlist,
|
|
|
|
"maintenance show btrace pt ",
|
|
|
|
0, &maint_btrace_show_cmdlist);
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
|
|
|
|
add_setshow_boolean_cmd ("skip-pad", class_maintenance,
|
|
|
|
&maint_btrace_pt_skip_pad, _("\
|
|
|
|
Set whether PAD packets should be skipped in the btrace packet history."), _("\
|
|
|
|
Show whether PAD packets should be skipped in the btrace packet history."),_("\
|
|
|
|
When enabled, PAD packets are ignored in the btrace packet history."),
|
|
|
|
NULL, show_maint_btrace_pt_skip_pad,
|
|
|
|
&maint_btrace_pt_set_cmdlist,
|
|
|
|
&maint_btrace_pt_show_cmdlist);
|
|
|
|
|
|
|
|
add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
|
|
|
|
_("Print the raw branch tracing data.\n\
|
|
|
|
With no argument, print ten more packets after the previous ten-line print.\n\
|
|
|
|
With '-' as argument print ten packets before a previous ten-line print.\n\
|
|
|
|
One argument specifies the starting packet of a ten-line print.\n\
|
|
|
|
Two arguments with comma between specify starting and ending packets to \
|
|
|
|
print.\n\
|
|
|
|
Preceded with '+'/'-' the second argument specifies the distance from the \
|
2019-06-04 20:17:09 +08:00
|
|
|
first."),
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
&maint_btrace_cmdlist);
|
|
|
|
|
|
|
|
add_cmd ("clear-packet-history", class_maintenance,
|
|
|
|
maint_btrace_clear_packet_history_cmd,
|
|
|
|
_("Clears the branch tracing packet history.\n\
|
2019-06-04 20:17:09 +08:00
|
|
|
Discards the raw branch tracing data but not the execution history data."),
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
&maint_btrace_cmdlist);
|
|
|
|
|
|
|
|
add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
|
|
|
|
_("Clears the branch tracing data.\n\
|
|
|
|
Discards the raw branch tracing data and the execution history data.\n\
|
2019-06-04 20:17:09 +08:00
|
|
|
The next 'record' command will fetch the branch tracing data anew."),
|
btrace: maintenance commands
Add maintenance commands that help debugging the btrace record target.
The following new commands are added:
maint info btrace
Print information about branch tracing internals.
maint btrace packet-history
Print the raw branch tracing data.
maint btrace clear-packet-history
Discard the stored raw branch tracing data.
maint btrace clear
Discard all branch tracing data. It will be fetched and processed
anew by the next "record" command.
maint set|show btrace pt skip-pad
Set and show whether PAD packets are skipped when computing the
packet history.
gdb/
* btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h.
(maint_btrace_cmdlist, maint_btrace_set_cmdlist)
(maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist)
(maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad)
(btrace_maint_clear): New.
(btrace_fetch, btrace_clear): Call btrace_maint_clear.
(pt_print_packet, btrace_maint_decode_pt)
(btrace_maint_update_pt_packets, btrace_maint_update_packets)
(btrace_maint_print_packets, get_uint, get_context_size, no_chunk)
(maint_btrace_packet_history_cmd)
(maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd)
(maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd)
(maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd)
(maint_info_btrace_cmd, _initialize_btrace): New.
* btrace.h (btrace_pt_packet, btrace_pt_packet_s)
(btrace_maint_packet_history, btrace_maint_info): New.
(btrace_thread_info) <maint>: New.
* NEWS: Announce it.
doc/
* gdb.texinfo (Maintenance Commands): Document "maint btrace"
commands.
2014-02-03 21:35:28 +08:00
|
|
|
&maint_btrace_cmdlist);
|
|
|
|
|
|
|
|
}
|