binutils-gdb/gdb/btrace.c

3555 lines
92 KiB
C
Raw Normal View History

/* Branch trace support for GDB, the GNU debugger.
Copyright (C) 2013-2017 Free Software Foundation, Inc.
Contributed by Intel Corp. <markus.t.metzger@intel.com>
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "defs.h"
#include "btrace.h"
#include "gdbthread.h"
#include "inferior.h"
#include "target.h"
#include "record.h"
#include "symtab.h"
#include "disasm.h"
#include "source.h"
#include "filenames.h"
#include "xml-support.h"
#include "regcache.h"
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
#include "rsp-low.h"
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
#include "gdbcmd.h"
#include "cli/cli-utils.h"
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
#include <inttypes.h>
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
#include <ctype.h>
gdb: Use std::min and std::max throughout Otherwise including <string> or some other C++ header is broken. E.g.: In file included from /opt/gcc/include/c++/7.0.0/bits/char_traits.h:39:0, from /opt/gcc/include/c++/7.0.0/string:40, from /home/pedro/gdb/mygit/cxx-convertion/src/gdb/infrun.c:68: /opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:243:56: error: macro "min" passed 3 arguments, but takes just 2 min(const _Tp& __a, const _Tp& __b, _Compare __comp) ^ /opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:265:56: error: macro "max" passed 3 arguments, but takes just 2 max(const _Tp& __a, const _Tp& __b, _Compare __comp) ^ In file included from .../src/gdb/infrun.c:21:0: To the best of my grepping abilities, I believe I adjusted all min/max calls. gdb/ChangeLog: 2016-09-16 Pedro Alves <palves@redhat.com> * defs.h (min, max): Delete. * aarch64-tdep.c: Include <algorithm> and use std::min and std::max throughout. * aarch64-tdep.c: Likewise. * alpha-tdep.c: Likewise. * amd64-tdep.c: Likewise. * amd64-windows-tdep.c: Likewise. * arm-tdep.c: Likewise. * avr-tdep.c: Likewise. * breakpoint.c: Likewise. * btrace.c: Likewise. * ctf.c: Likewise. * disasm.c: Likewise. * doublest.c: Likewise. * dwarf2loc.c: Likewise. * dwarf2read.c: Likewise. * environ.c: Likewise. * exec.c: Likewise. * f-exp.y: Likewise. * findcmd.c: Likewise. * ft32-tdep.c: Likewise. * gcore.c: Likewise. * hppa-tdep.c: Likewise. * i386-darwin-tdep.c: Likewise. * i386-tdep.c: Likewise. * linux-thread-db.c: Likewise. * lm32-tdep.c: Likewise. * m32r-tdep.c: Likewise. * m88k-tdep.c: Likewise. * memrange.c: Likewise. * minidebug.c: Likewise. * mips-tdep.c: Likewise. * moxie-tdep.c: Likewise. * nds32-tdep.c: Likewise. * nios2-tdep.c: Likewise. * nto-procfs.c: Likewise. * parse.c: Likewise. * ppc-sysv-tdep.c: Likewise. * probe.c: Likewise. * record-btrace.c: Likewise. * remote.c: Likewise. * rs6000-tdep.c: Likewise. * rx-tdep.c: Likewise. * s390-linux-nat.c: Likewise. * s390-linux-tdep.c: Likewise. * ser-tcp.c: Likewise. * sh-tdep.c: Likewise. * sh64-tdep.c: Likewise. * source.c: Likewise. * sparc-tdep.c: Likewise. * symfile.c: Likewise. * target-memory.c: Likewise. * target.c: Likewise. * tic6x-tdep.c: Likewise. * tilegx-tdep.c: Likewise. * tracefile-tfile.c: Likewise. * tracepoint.c: Likewise. * valprint.c: Likewise. * value.c: Likewise. * xtensa-tdep.c: Likewise. * cli/cli-cmds.c: Likewise. * compile/compile-object-load.c: Likewise.
2016-09-17 02:55:17 +08:00
#include <algorithm>
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
/* Command lists for btrace maintenance commands. */
static struct cmd_list_element *maint_btrace_cmdlist;
static struct cmd_list_element *maint_btrace_set_cmdlist;
static struct cmd_list_element *maint_btrace_show_cmdlist;
static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
/* Control whether to skip PAD packets when computing the packet history. */
static int maint_btrace_pt_skip_pad = 1;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
static void btrace_add_pc (struct thread_info *tp);
/* Print a record debug message. Use do ... while (0) to avoid ambiguities
when used in if statements. */
#define DEBUG(msg, args...) \
do \
{ \
if (record_debug != 0) \
fprintf_unfiltered (gdb_stdlog, \
"[btrace] " msg "\n", ##args); \
} \
while (0)
#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
/* Return the function name of a recorded function segment for printing.
This function never returns NULL. */
static const char *
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_print_function_name (const struct btrace_function *bfun)
{
struct minimal_symbol *msym;
struct symbol *sym;
msym = bfun->msym;
sym = bfun->sym;
if (sym != NULL)
return SYMBOL_PRINT_NAME (sym);
if (msym != NULL)
change minsym representation In a later patch we're going to change the minimal symbol address calculation to apply section offsets at the point of use. To make it simpler to catch potential problem spots, this patch changes the representation of minimal symbols and introduces new minimal-symbol-specific variants of the various accessors. This is necessary because it would be excessively ambitious to try to convert all the symbol types at once. The core of this change is just renaming a field in minimal_symbol; the rest is just a fairly mechanical rewording. 2014-02-26 Tom Tromey <tromey@redhat.com> * symtab.h (struct minimal_symbol) <mginfo>: Rename from ginfo. (MSYMBOL_VALUE, MSYMBOL_VALUE_ADDRESS, MSYMBOL_VALUE_BYTES) (MSYMBOL_BLOCK_VALUE, MSYMBOL_VALUE_CHAIN, MSYMBOL_LANGUAGE) (MSYMBOL_SECTION, MSYMBOL_OBJ_SECTION, MSYMBOL_NATURAL_NAME) (MSYMBOL_LINKAGE_NAME, MSYMBOL_PRINT_NAME, MSYMBOL_DEMANGLED_NAME) (MSYMBOL_SET_LANGUAGE, MSYMBOL_SEARCH_NAME) (MSYMBOL_MATCHES_SEARCH_NAME, MSYMBOL_SET_NAMES): New macros. * ada-lang.c (ada_main_name): Update. (ada_lookup_simple_minsym): Update. (ada_make_symbol_completion_list): Update. (ada_add_standard_exceptions): Update. * ada-tasks.c (read_atcb, ada_tasks_inferior_data_sniffer): Update. * aix-thread.c (pdc_symbol_addrs, pd_enable): Update. * amd64-windows-tdep.c (amd64_skip_main_prologue): Update. * arm-tdep.c (skip_prologue_function): Update. (arm_skip_stack_protector, arm_skip_stub): Update. * arm-wince-tdep.c (arm_pe_skip_trampoline_code): Update. (arm_wince_skip_main_prologue): Update. * auxv.c (ld_so_xfer_auxv): Update. * avr-tdep.c (avr_scan_prologue): Update. * ax-gdb.c (gen_var_ref): Update. * block.c (call_site_for_pc): Update. * blockframe.c (get_pc_function_start): Update. (find_pc_partial_function_gnu_ifunc): Update. * breakpoint.c (create_overlay_event_breakpoint): Update. (create_longjmp_master_breakpoint): Update. (create_std_terminate_master_breakpoint): Update. (create_exception_master_breakpoint): Update. (resolve_sal_pc): Update. * bsd-uthread.c (bsd_uthread_lookup_address): Update. * btrace.c (ftrace_print_function_name, ftrace_function_switched): Update. * c-valprint.c (c_val_print): Update. * coff-pe-read.c (add_pe_forwarded_sym): Update. * coffread.c (coff_symfile_read): Update. * common/agent.c (agent_look_up_symbols): Update. * dbxread.c (find_stab_function_addr): Update. (end_psymtab): Update. * dwarf2loc.c (call_site_to_target_addr): Update. (func_verify_no_selftailcall): Update. (tailcall_dump): Update. (call_site_find_chain_1): Update. (dwarf_expr_reg_to_entry_parameter): Update. * elfread.c (elf_gnu_ifunc_record_cache): Update. (elf_gnu_ifunc_resolve_by_got): Update. * f-valprint.c (info_common_command): Update. * findvar.c (read_var_value): Update. * frame.c (get_prev_frame_1): Update. (inside_main_func): Update. * frv-tdep.c (frv_skip_main_prologue): Update. (frv_frame_this_id): Update. * glibc-tdep.c (glibc_skip_solib_resolver): Update. * gnu-v2-abi.c (gnuv2_value_rtti_type): Update. * gnu-v3-abi.c (gnuv3_rtti_type): Update. (gnuv3_skip_trampoline): Update. * hppa-hpux-tdep.c (hppa32_hpux_in_solib_call_trampoline): Update. (hppa64_hpux_in_solib_call_trampoline): Update. (hppa_hpux_skip_trampoline_code): Update. (hppa64_hpux_search_dummy_call_sequence): Update. (hppa_hpux_find_import_stub_for_addr): Update. (hppa_hpux_find_dummy_bpaddr): Update. * hppa-tdep.c (hppa_symbol_address) (hppa_lookup_stub_minimal_symbol): Update. * i386-tdep.c (i386_skip_main_prologue): Update. (i386_pe_skip_trampoline_code): Update. * ia64-tdep.c (ia64_convert_from_func_ptr_addr): Update. * infcall.c (get_function_name): Update. * infcmd.c (until_next_command): Update. * jit.c (jit_breakpoint_re_set_internal): Update. (jit_inferior_init): Update. * linespec.c (minsym_found): Update. (add_minsym): Update. * linux-fork.c (info_checkpoints_command): Update. * linux-nat.c (get_signo): Update. * linux-thread-db.c (inferior_has_bug): Update. * m32c-tdep.c (m32c_return_value): Update. (m32c_m16c_address_to_pointer): Update. (m32c_m16c_pointer_to_address): Update. * m32r-tdep.c (m32r_frame_this_id): Update. * m68hc11-tdep.c (m68hc11_get_register_info): Update. * machoread.c (macho_resolve_oso_sym_with_minsym): Update. * maint.c (maintenance_translate_address): Update. * minsyms.c (add_minsym_to_hash_table): Update. (add_minsym_to_demangled_hash_table): Update. (msymbol_objfile): Update. (lookup_minimal_symbol): Update. (iterate_over_minimal_symbols): Update. (lookup_minimal_symbol_text): Update. (lookup_minimal_symbol_by_pc_name): Update. (lookup_minimal_symbol_solib_trampoline): Update. (lookup_minimal_symbol_by_pc_section_1): Update. (lookup_minimal_symbol_and_objfile): Update. (prim_record_minimal_symbol_full): Update. (compare_minimal_symbols): Update. (compact_minimal_symbols): Update. (build_minimal_symbol_hash_tables): Update. (install_minimal_symbols): Update. (terminate_minimal_symbol_table): Update. (find_solib_trampoline_target): Update. (minimal_symbol_upper_bound): Update. * mips-linux-tdep.c (mips_linux_skip_resolver): Update. * mips-tdep.c (mips_stub_frame_sniffer): Update. (mips_skip_pic_trampoline_code): Update. * msp430-tdep.c (msp430_skip_trampoline_code): Update. * objc-lang.c (selectors_info): Update. (classes_info): Update. (find_methods): Update. (find_imps): Update. (find_objc_msgsend): Update. * objfiles.c (objfile_relocate1): Update. * objfiles.h (ALL_OBJFILE_MSYMBOLS): Update. * obsd-tdep.c (obsd_skip_solib_resolver): Update. * p-valprint.c (pascal_val_print): Update. * parse.c (write_exp_msymbol): Update. * ppc-linux-tdep.c (powerpc_linux_in_dynsym_resolve_code) (ppc_linux_spe_context_lookup, ppc_elfv2_skip_entrypoint): Update. * ppc-sysv-tdep.c (convert_code_addr_to_desc_addr): Update. * printcmd.c (build_address_symbolic): Update. (sym_info): Update. (address_info): Update. * proc-service.c (ps_pglobal_lookup): Update. * psymtab.c (find_pc_sect_psymtab_closer): Update. (find_pc_sect_psymtab): Update. * python/py-framefilter.c (py_print_frame): Update. * ravenscar-thread.c (get_running_thread_id): Update. * record-btrace.c (btrace_call_history, btrace_get_bfun_name): Update. * remote.c (remote_check_symbols): Update. * rs6000-tdep.c (rs6000_skip_main_prologue): Update. (rs6000_skip_trampoline_code): Update. * sh64-tdep.c (sh64_elf_make_msymbol_special): Update. * sol2-tdep.c (sol2_skip_solib_resolver): Update. * solib-dsbt.c (lm_base): Update. * solib-frv.c (lm_base): Update. (main_got): Update. * solib-irix.c (locate_base): Update. * solib-som.c (som_solib_create_inferior_hook): Update. (som_solib_desire_dynamic_linker_symbols): Update. (link_map_start): Update. * solib-spu.c (spu_enable_break): Update. (ocl_enable_break): Update. * solib-svr4.c (elf_locate_base): Update. (enable_break): Update. * spu-tdep.c (spu_get_overlay_table): Update. (spu_catch_start): Update. (flush_ea_cache): Update. * stabsread.c (define_symbol): Update. (scan_file_globals): Update. * stack.c (find_frame_funname): Update. (frame_info): Update. * symfile.c (simple_read_overlay_table): Update. (simple_overlay_update): Update. * symmisc.c (dump_msymbols): Update. * symtab.c (fixup_section): Update. (find_pc_sect_line): Update. (skip_prologue_sal): Update. (search_symbols): Update. (print_msymbol_info): Update. (rbreak_command): Update. (MCOMPLETION_LIST_ADD_SYMBOL): New macro. (completion_list_objc_symbol): Update. (default_make_symbol_completion_list_break_on): Update. * tracepoint.c (scope_info): Update. * tui/tui-disasm.c (tui_find_disassembly_address): Update. (tui_get_begin_asm_address): Update. * valops.c (find_function_in_inferior): Update. * value.c (value_static_field): Update. (value_fn_field): Update.
2013-08-15 22:43:43 +08:00
return MSYMBOL_PRINT_NAME (msym);
return "<unknown>";
}
/* Return the file name of a recorded function segment for printing.
This function never returns NULL. */
static const char *
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_print_filename (const struct btrace_function *bfun)
{
struct symbol *sym;
const char *filename;
sym = bfun->sym;
if (sym != NULL)
filename = symtab_to_filename_for_display (symbol_symtab (sym));
else
filename = "<unknown>";
return filename;
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Return a string representation of the address of an instruction.
This function never returns NULL. */
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
static const char *
ftrace_print_insn_addr (const struct btrace_insn *insn)
{
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (insn == NULL)
return "<nil>";
return core_addr_to_string_nz (insn->pc);
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Print an ftrace debug status message. */
static void
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_debug (const struct btrace_function *bfun, const char *prefix)
{
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
const char *fun, *file;
unsigned int ibegin, iend;
int level;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
fun = ftrace_print_function_name (bfun);
file = ftrace_print_filename (bfun);
level = bfun->level;
ibegin = bfun->insn_offset;
iend = ibegin + bfun->insn.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
prefix, fun, file, level, ibegin, iend);
}
/* Return the number of instructions in a given function call segment. */
static unsigned int
ftrace_call_num_insn (const struct btrace_function* bfun)
{
if (bfun == NULL)
return 0;
/* A gap is always counted as one instruction. */
if (bfun->errcode != 0)
return 1;
return bfun->insn.size ();
}
/* Return the function segment with the given NUMBER or NULL if no such segment
exists. BTINFO is the branch trace information for the current thread. */
static struct btrace_function *
ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
unsigned int number)
{
if (number == 0 || number > btinfo->functions.size ())
return NULL;
return &btinfo->functions[number - 1];
}
/* A const version of the function above. */
static const struct btrace_function *
ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
unsigned int number)
{
if (number == 0 || number > btinfo->functions.size ())
return NULL;
return &btinfo->functions[number - 1];
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Return non-zero if BFUN does not match MFUN and FUN,
return zero otherwise. */
static int
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_function_switched (const struct btrace_function *bfun,
const struct minimal_symbol *mfun,
const struct symbol *fun)
{
struct minimal_symbol *msym;
struct symbol *sym;
msym = bfun->msym;
sym = bfun->sym;
/* If the minimal symbol changed, we certainly switched functions. */
if (mfun != NULL && msym != NULL
change minsym representation In a later patch we're going to change the minimal symbol address calculation to apply section offsets at the point of use. To make it simpler to catch potential problem spots, this patch changes the representation of minimal symbols and introduces new minimal-symbol-specific variants of the various accessors. This is necessary because it would be excessively ambitious to try to convert all the symbol types at once. The core of this change is just renaming a field in minimal_symbol; the rest is just a fairly mechanical rewording. 2014-02-26 Tom Tromey <tromey@redhat.com> * symtab.h (struct minimal_symbol) <mginfo>: Rename from ginfo. (MSYMBOL_VALUE, MSYMBOL_VALUE_ADDRESS, MSYMBOL_VALUE_BYTES) (MSYMBOL_BLOCK_VALUE, MSYMBOL_VALUE_CHAIN, MSYMBOL_LANGUAGE) (MSYMBOL_SECTION, MSYMBOL_OBJ_SECTION, MSYMBOL_NATURAL_NAME) (MSYMBOL_LINKAGE_NAME, MSYMBOL_PRINT_NAME, MSYMBOL_DEMANGLED_NAME) (MSYMBOL_SET_LANGUAGE, MSYMBOL_SEARCH_NAME) (MSYMBOL_MATCHES_SEARCH_NAME, MSYMBOL_SET_NAMES): New macros. * ada-lang.c (ada_main_name): Update. (ada_lookup_simple_minsym): Update. (ada_make_symbol_completion_list): Update. (ada_add_standard_exceptions): Update. * ada-tasks.c (read_atcb, ada_tasks_inferior_data_sniffer): Update. * aix-thread.c (pdc_symbol_addrs, pd_enable): Update. * amd64-windows-tdep.c (amd64_skip_main_prologue): Update. * arm-tdep.c (skip_prologue_function): Update. (arm_skip_stack_protector, arm_skip_stub): Update. * arm-wince-tdep.c (arm_pe_skip_trampoline_code): Update. (arm_wince_skip_main_prologue): Update. * auxv.c (ld_so_xfer_auxv): Update. * avr-tdep.c (avr_scan_prologue): Update. * ax-gdb.c (gen_var_ref): Update. * block.c (call_site_for_pc): Update. * blockframe.c (get_pc_function_start): Update. (find_pc_partial_function_gnu_ifunc): Update. * breakpoint.c (create_overlay_event_breakpoint): Update. (create_longjmp_master_breakpoint): Update. (create_std_terminate_master_breakpoint): Update. (create_exception_master_breakpoint): Update. (resolve_sal_pc): Update. * bsd-uthread.c (bsd_uthread_lookup_address): Update. * btrace.c (ftrace_print_function_name, ftrace_function_switched): Update. * c-valprint.c (c_val_print): Update. * coff-pe-read.c (add_pe_forwarded_sym): Update. * coffread.c (coff_symfile_read): Update. * common/agent.c (agent_look_up_symbols): Update. * dbxread.c (find_stab_function_addr): Update. (end_psymtab): Update. * dwarf2loc.c (call_site_to_target_addr): Update. (func_verify_no_selftailcall): Update. (tailcall_dump): Update. (call_site_find_chain_1): Update. (dwarf_expr_reg_to_entry_parameter): Update. * elfread.c (elf_gnu_ifunc_record_cache): Update. (elf_gnu_ifunc_resolve_by_got): Update. * f-valprint.c (info_common_command): Update. * findvar.c (read_var_value): Update. * frame.c (get_prev_frame_1): Update. (inside_main_func): Update. * frv-tdep.c (frv_skip_main_prologue): Update. (frv_frame_this_id): Update. * glibc-tdep.c (glibc_skip_solib_resolver): Update. * gnu-v2-abi.c (gnuv2_value_rtti_type): Update. * gnu-v3-abi.c (gnuv3_rtti_type): Update. (gnuv3_skip_trampoline): Update. * hppa-hpux-tdep.c (hppa32_hpux_in_solib_call_trampoline): Update. (hppa64_hpux_in_solib_call_trampoline): Update. (hppa_hpux_skip_trampoline_code): Update. (hppa64_hpux_search_dummy_call_sequence): Update. (hppa_hpux_find_import_stub_for_addr): Update. (hppa_hpux_find_dummy_bpaddr): Update. * hppa-tdep.c (hppa_symbol_address) (hppa_lookup_stub_minimal_symbol): Update. * i386-tdep.c (i386_skip_main_prologue): Update. (i386_pe_skip_trampoline_code): Update. * ia64-tdep.c (ia64_convert_from_func_ptr_addr): Update. * infcall.c (get_function_name): Update. * infcmd.c (until_next_command): Update. * jit.c (jit_breakpoint_re_set_internal): Update. (jit_inferior_init): Update. * linespec.c (minsym_found): Update. (add_minsym): Update. * linux-fork.c (info_checkpoints_command): Update. * linux-nat.c (get_signo): Update. * linux-thread-db.c (inferior_has_bug): Update. * m32c-tdep.c (m32c_return_value): Update. (m32c_m16c_address_to_pointer): Update. (m32c_m16c_pointer_to_address): Update. * m32r-tdep.c (m32r_frame_this_id): Update. * m68hc11-tdep.c (m68hc11_get_register_info): Update. * machoread.c (macho_resolve_oso_sym_with_minsym): Update. * maint.c (maintenance_translate_address): Update. * minsyms.c (add_minsym_to_hash_table): Update. (add_minsym_to_demangled_hash_table): Update. (msymbol_objfile): Update. (lookup_minimal_symbol): Update. (iterate_over_minimal_symbols): Update. (lookup_minimal_symbol_text): Update. (lookup_minimal_symbol_by_pc_name): Update. (lookup_minimal_symbol_solib_trampoline): Update. (lookup_minimal_symbol_by_pc_section_1): Update. (lookup_minimal_symbol_and_objfile): Update. (prim_record_minimal_symbol_full): Update. (compare_minimal_symbols): Update. (compact_minimal_symbols): Update. (build_minimal_symbol_hash_tables): Update. (install_minimal_symbols): Update. (terminate_minimal_symbol_table): Update. (find_solib_trampoline_target): Update. (minimal_symbol_upper_bound): Update. * mips-linux-tdep.c (mips_linux_skip_resolver): Update. * mips-tdep.c (mips_stub_frame_sniffer): Update. (mips_skip_pic_trampoline_code): Update. * msp430-tdep.c (msp430_skip_trampoline_code): Update. * objc-lang.c (selectors_info): Update. (classes_info): Update. (find_methods): Update. (find_imps): Update. (find_objc_msgsend): Update. * objfiles.c (objfile_relocate1): Update. * objfiles.h (ALL_OBJFILE_MSYMBOLS): Update. * obsd-tdep.c (obsd_skip_solib_resolver): Update. * p-valprint.c (pascal_val_print): Update. * parse.c (write_exp_msymbol): Update. * ppc-linux-tdep.c (powerpc_linux_in_dynsym_resolve_code) (ppc_linux_spe_context_lookup, ppc_elfv2_skip_entrypoint): Update. * ppc-sysv-tdep.c (convert_code_addr_to_desc_addr): Update. * printcmd.c (build_address_symbolic): Update. (sym_info): Update. (address_info): Update. * proc-service.c (ps_pglobal_lookup): Update. * psymtab.c (find_pc_sect_psymtab_closer): Update. (find_pc_sect_psymtab): Update. * python/py-framefilter.c (py_print_frame): Update. * ravenscar-thread.c (get_running_thread_id): Update. * record-btrace.c (btrace_call_history, btrace_get_bfun_name): Update. * remote.c (remote_check_symbols): Update. * rs6000-tdep.c (rs6000_skip_main_prologue): Update. (rs6000_skip_trampoline_code): Update. * sh64-tdep.c (sh64_elf_make_msymbol_special): Update. * sol2-tdep.c (sol2_skip_solib_resolver): Update. * solib-dsbt.c (lm_base): Update. * solib-frv.c (lm_base): Update. (main_got): Update. * solib-irix.c (locate_base): Update. * solib-som.c (som_solib_create_inferior_hook): Update. (som_solib_desire_dynamic_linker_symbols): Update. (link_map_start): Update. * solib-spu.c (spu_enable_break): Update. (ocl_enable_break): Update. * solib-svr4.c (elf_locate_base): Update. (enable_break): Update. * spu-tdep.c (spu_get_overlay_table): Update. (spu_catch_start): Update. (flush_ea_cache): Update. * stabsread.c (define_symbol): Update. (scan_file_globals): Update. * stack.c (find_frame_funname): Update. (frame_info): Update. * symfile.c (simple_read_overlay_table): Update. (simple_overlay_update): Update. * symmisc.c (dump_msymbols): Update. * symtab.c (fixup_section): Update. (find_pc_sect_line): Update. (skip_prologue_sal): Update. (search_symbols): Update. (print_msymbol_info): Update. (rbreak_command): Update. (MCOMPLETION_LIST_ADD_SYMBOL): New macro. (completion_list_objc_symbol): Update. (default_make_symbol_completion_list_break_on): Update. * tracepoint.c (scope_info): Update. * tui/tui-disasm.c (tui_find_disassembly_address): Update. (tui_get_begin_asm_address): Update. * valops.c (find_function_in_inferior): Update. * value.c (value_static_field): Update. (value_fn_field): Update.
2013-08-15 22:43:43 +08:00
&& strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
return 1;
/* If the symbol changed, we certainly switched functions. */
if (fun != NULL && sym != NULL)
{
const char *bfname, *fname;
/* Check the function name. */
if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
return 1;
/* Check the location of those functions, as well. */
bfname = symtab_to_fullname (symbol_symtab (sym));
fname = symtab_to_fullname (symbol_symtab (fun));
if (filename_cmp (fname, bfname) != 0)
return 1;
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* If we lost symbol information, we switched functions. */
if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
return 1;
/* If we gained symbol information, we switched functions. */
if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
return 1;
return 0;
}
2017-05-30 18:47:37 +08:00
/* Allocate and initialize a new branch trace function segment at the end of
the trace.
BTINFO is the branch trace information for the current thread.
MFUN and FUN are the symbol information we have for this function.
This invalidates all struct btrace_function pointer currently held. */
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
static struct btrace_function *
ftrace_new_function (struct btrace_thread_info *btinfo,
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct minimal_symbol *mfun,
struct symbol *fun)
{
int level;
unsigned int number, insn_offset;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (btinfo->functions.empty ())
{
/* Start counting NUMBER and INSN_OFFSET at one. */
level = 0;
number = 1;
insn_offset = 1;
}
else
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
const struct btrace_function *prev = &btinfo->functions.back ();
level = prev->level;
number = prev->number + 1;
insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
return &btinfo->functions.back ();
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Update the UP field of a function segment. */
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
static void
ftrace_update_caller (struct btrace_function *bfun,
struct btrace_function *caller,
enum btrace_function_flag flags)
{
if (bfun->up != 0)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_debug (bfun, "updating caller");
bfun->up = caller->number;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
bfun->flags = flags;
ftrace_debug (bfun, "set caller");
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
ftrace_debug (caller, "..to");
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* Fix up the caller for all segments of a function. */
static void
ftrace_fixup_caller (struct btrace_thread_info *btinfo,
struct btrace_function *bfun,
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct btrace_function *caller,
enum btrace_function_flag flags)
{
unsigned int prev, next;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
prev = bfun->prev;
next = bfun->next;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_update_caller (bfun, caller, flags);
/* Update all function segments belonging to the same function. */
for (; prev != 0; prev = bfun->prev)
{
bfun = ftrace_find_call_by_number (btinfo, prev);
ftrace_update_caller (bfun, caller, flags);
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
for (; next != 0; next = bfun->next)
{
bfun = ftrace_find_call_by_number (btinfo, next);
ftrace_update_caller (bfun, caller, flags);
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
2017-05-30 18:47:37 +08:00
/* Add a new function segment for a call at the end of the trace.
BTINFO is the branch trace information for the current thread.
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
ftrace_new_call (struct btrace_thread_info *btinfo,
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct minimal_symbol *mfun,
struct symbol *fun)
{
const unsigned int length = btinfo->functions.size ();
2017-05-30 18:47:37 +08:00
struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
bfun->up = length;
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
bfun->level += 1;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_debug (bfun, "new call");
return bfun;
}
2017-05-30 18:47:37 +08:00
/* Add a new function segment for a tail call at the end of the trace.
BTINFO is the branch trace information for the current thread.
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
ftrace_new_tailcall (struct btrace_thread_info *btinfo,
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct minimal_symbol *mfun,
struct symbol *fun)
{
const unsigned int length = btinfo->functions.size ();
2017-05-30 18:47:37 +08:00
struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
bfun->up = length;
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
bfun->level += 1;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_debug (bfun, "new tail call");
return bfun;
}
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* Return the caller of BFUN or NULL if there is none. This function skips
tail calls in the call chain. BTINFO is the branch trace information for
the current thread. */
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
static struct btrace_function *
ftrace_get_caller (struct btrace_thread_info *btinfo,
struct btrace_function *bfun)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
return ftrace_find_call_by_number (btinfo, bfun->up);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
return NULL;
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
symbol information. BTINFO is the branch trace information for the current
thread. */
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
static struct btrace_function *
ftrace_find_caller (struct btrace_thread_info *btinfo,
struct btrace_function *bfun,
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct minimal_symbol *mfun,
struct symbol *fun)
{
for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
/* Skip functions with incompatible symbol information. */
if (ftrace_function_switched (bfun, mfun, fun))
continue;
/* This is the function segment we're looking for. */
break;
}
return bfun;
}
/* Find the innermost caller in the back trace of BFUN, skipping all
function segments that do not end with a call instruction (e.g.
tail calls ending with a jump). BTINFO is the branch trace information for
the current thread. */
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
static struct btrace_function *
ftrace_find_call (struct btrace_thread_info *btinfo,
struct btrace_function *bfun)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
{
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* Skip gaps. */
if (bfun->errcode != 0)
continue;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
btrace_insn &last = bfun->insn.back ();
if (last.iclass == BTRACE_INSN_CALL)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
break;
}
return bfun;
}
2017-05-30 18:47:37 +08:00
/* Add a continuation segment for a function into which we return at the end of
the trace.
BTINFO is the branch trace information for the current thread.
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
ftrace_new_return (struct btrace_thread_info *btinfo,
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct minimal_symbol *mfun,
struct symbol *fun)
{
struct btrace_function *prev, *bfun, *caller;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
2017-05-30 18:47:37 +08:00
bfun = ftrace_new_function (btinfo, mfun, fun);
prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* It is important to start at PREV's caller. Otherwise, we might find
PREV itself, if PREV is a recursive function. */
caller = ftrace_find_call_by_number (btinfo, prev->up);
caller = ftrace_find_caller (btinfo, caller, mfun, fun);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (caller != NULL)
{
/* The caller of PREV is the preceding btrace function segment in this
function instance. */
gdb_assert (caller->next == 0);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
caller->next = bfun->number;
bfun->prev = caller->number;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Maintain the function level. */
bfun->level = caller->level;
/* Maintain the call stack. */
bfun->up = caller->up;
bfun->flags = caller->flags;
ftrace_debug (bfun, "new return");
}
else
{
/* We did not find a caller. This could mean that something went
wrong or that the call is simply not included in the trace. */
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Let's search for some actual call. */
caller = ftrace_find_call_by_number (btinfo, prev->up);
caller = ftrace_find_call (btinfo, caller);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (caller == NULL)
{
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* There is no call in PREV's back trace. We assume that the
branch trace did not include it. */
btrace: preserve function level for unexpected returns When encountering a return for which we have not seen a corresponding call, GDB starts a new back trace from level -1, i.e. from the level of the first function in the trace. In the presence of trace gaps, this may cause some rather big jump. (gdb) record function-call-history /c 192, +8 192 sbrk 193 brk 194 __x86.get_pc_thunk.bx 195 brk 196 __kernel_vsyscall 197 [disabled] 198 __kernel_vsyscall 199 brk 200 sbrk This doesn't help to make things more clear. Let's remain on the same level instead. (gdb) record function-call-history /c 192, +8 192 sbrk 193 brk 194 __x86.get_pc_thunk.bx 195 brk 196 __kernel_vsyscall 197 [disabled] 198 __kernel_vsyscall 199 brk 200 sbrk In this case it will look like we were able to connect the trace parts across the disabled gap. We were not. More work is required to achieve this. In the general case, the function-call history for the two trace parts won't match. They may be off by a few levels or they may be entirely different. All this patch does is to preserve the indentation level of the record function-call-history command. The disabled gap is caused by a sysenter not returning to the next instruction. (gdb) record function-call-history /i 196, +1 196 __kernel_vsyscall inst 66515,66519 (gdb) record instruction-history 66515 66515 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx 66516 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx 66517 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp 66518 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp 66519 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter [disabled] 66520 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp 66521 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx 66522 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx 66523 0xb7fdcc0b <__kernel_vsyscall+19>: ret 66524 0xb7e8e09e <brk+30>: xchg %ecx,%ebx (gdb) disassemble 0xb7fdcbf8, 0xb7fdcc0c Dump of assembler code from 0xb7fdcbf8 to 0xb7fdcc0c: 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter 0xb7fdcbff <__kernel_vsyscall+7>: nop 0xb7fdcc00 <__kernel_vsyscall+8>: nop 0xb7fdcc01 <__kernel_vsyscall+9>: nop 0xb7fdcc02 <__kernel_vsyscall+10>: nop 0xb7fdcc03 <__kernel_vsyscall+11>: nop 0xb7fdcc04 <__kernel_vsyscall+12>: nop 0xb7fdcc05 <__kernel_vsyscall+13>: nop 0xb7fdcc06 <__kernel_vsyscall+14>: int $0x80 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx 0xb7fdcc0b <__kernel_vsyscall+19>: ret End of assembler dump. I've seen this on 32-bit Fedora 23. I have not investigated what causes this and whether we can avoid the gap in the first place. Let's first try to make GDB handle such gaps more gracefully. gdb/ * btrace.c (ftrace_new_return): Start from the previous function's level if we can't find a matching call for a return.
2016-01-19 21:54:19 +08:00
/* Let's find the topmost function and add a new caller for it.
This should handle a series of initial tail calls. */
while (prev->up != 0)
prev = ftrace_find_call_by_number (btinfo, prev->up);
btrace: preserve function level for unexpected returns When encountering a return for which we have not seen a corresponding call, GDB starts a new back trace from level -1, i.e. from the level of the first function in the trace. In the presence of trace gaps, this may cause some rather big jump. (gdb) record function-call-history /c 192, +8 192 sbrk 193 brk 194 __x86.get_pc_thunk.bx 195 brk 196 __kernel_vsyscall 197 [disabled] 198 __kernel_vsyscall 199 brk 200 sbrk This doesn't help to make things more clear. Let's remain on the same level instead. (gdb) record function-call-history /c 192, +8 192 sbrk 193 brk 194 __x86.get_pc_thunk.bx 195 brk 196 __kernel_vsyscall 197 [disabled] 198 __kernel_vsyscall 199 brk 200 sbrk In this case it will look like we were able to connect the trace parts across the disabled gap. We were not. More work is required to achieve this. In the general case, the function-call history for the two trace parts won't match. They may be off by a few levels or they may be entirely different. All this patch does is to preserve the indentation level of the record function-call-history command. The disabled gap is caused by a sysenter not returning to the next instruction. (gdb) record function-call-history /i 196, +1 196 __kernel_vsyscall inst 66515,66519 (gdb) record instruction-history 66515 66515 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx 66516 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx 66517 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp 66518 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp 66519 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter [disabled] 66520 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp 66521 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx 66522 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx 66523 0xb7fdcc0b <__kernel_vsyscall+19>: ret 66524 0xb7e8e09e <brk+30>: xchg %ecx,%ebx (gdb) disassemble 0xb7fdcbf8, 0xb7fdcc0c Dump of assembler code from 0xb7fdcbf8 to 0xb7fdcc0c: 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter 0xb7fdcbff <__kernel_vsyscall+7>: nop 0xb7fdcc00 <__kernel_vsyscall+8>: nop 0xb7fdcc01 <__kernel_vsyscall+9>: nop 0xb7fdcc02 <__kernel_vsyscall+10>: nop 0xb7fdcc03 <__kernel_vsyscall+11>: nop 0xb7fdcc04 <__kernel_vsyscall+12>: nop 0xb7fdcc05 <__kernel_vsyscall+13>: nop 0xb7fdcc06 <__kernel_vsyscall+14>: int $0x80 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx 0xb7fdcc0b <__kernel_vsyscall+19>: ret End of assembler dump. I've seen this on 32-bit Fedora 23. I have not investigated what causes this and whether we can avoid the gap in the first place. Let's first try to make GDB handle such gaps more gracefully. gdb/ * btrace.c (ftrace_new_return): Start from the previous function's level if we can't find a matching call for a return.
2016-01-19 21:54:19 +08:00
bfun->level = prev->level - 1;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Fix up the call stack for PREV. */
ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_debug (bfun, "new return - no caller");
}
else
{
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* There is a call in PREV's back trace to which we should have
btrace: preserve function level for unexpected returns When encountering a return for which we have not seen a corresponding call, GDB starts a new back trace from level -1, i.e. from the level of the first function in the trace. In the presence of trace gaps, this may cause some rather big jump. (gdb) record function-call-history /c 192, +8 192 sbrk 193 brk 194 __x86.get_pc_thunk.bx 195 brk 196 __kernel_vsyscall 197 [disabled] 198 __kernel_vsyscall 199 brk 200 sbrk This doesn't help to make things more clear. Let's remain on the same level instead. (gdb) record function-call-history /c 192, +8 192 sbrk 193 brk 194 __x86.get_pc_thunk.bx 195 brk 196 __kernel_vsyscall 197 [disabled] 198 __kernel_vsyscall 199 brk 200 sbrk In this case it will look like we were able to connect the trace parts across the disabled gap. We were not. More work is required to achieve this. In the general case, the function-call history for the two trace parts won't match. They may be off by a few levels or they may be entirely different. All this patch does is to preserve the indentation level of the record function-call-history command. The disabled gap is caused by a sysenter not returning to the next instruction. (gdb) record function-call-history /i 196, +1 196 __kernel_vsyscall inst 66515,66519 (gdb) record instruction-history 66515 66515 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx 66516 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx 66517 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp 66518 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp 66519 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter [disabled] 66520 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp 66521 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx 66522 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx 66523 0xb7fdcc0b <__kernel_vsyscall+19>: ret 66524 0xb7e8e09e <brk+30>: xchg %ecx,%ebx (gdb) disassemble 0xb7fdcbf8, 0xb7fdcc0c Dump of assembler code from 0xb7fdcbf8 to 0xb7fdcc0c: 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter 0xb7fdcbff <__kernel_vsyscall+7>: nop 0xb7fdcc00 <__kernel_vsyscall+8>: nop 0xb7fdcc01 <__kernel_vsyscall+9>: nop 0xb7fdcc02 <__kernel_vsyscall+10>: nop 0xb7fdcc03 <__kernel_vsyscall+11>: nop 0xb7fdcc04 <__kernel_vsyscall+12>: nop 0xb7fdcc05 <__kernel_vsyscall+13>: nop 0xb7fdcc06 <__kernel_vsyscall+14>: int $0x80 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx 0xb7fdcc0b <__kernel_vsyscall+19>: ret End of assembler dump. I've seen this on 32-bit Fedora 23. I have not investigated what causes this and whether we can avoid the gap in the first place. Let's first try to make GDB handle such gaps more gracefully. gdb/ * btrace.c (ftrace_new_return): Start from the previous function's level if we can't find a matching call for a return.
2016-01-19 21:54:19 +08:00
returned but didn't. Let's start a new, separate back trace
from PREV's level. */
bfun->level = prev->level - 1;
/* We fix up the back trace for PREV but leave other function segments
on the same level as they are.
This should handle things like schedule () correctly where we're
switching contexts. */
prev->up = bfun->number;
btrace: preserve function level for unexpected returns When encountering a return for which we have not seen a corresponding call, GDB starts a new back trace from level -1, i.e. from the level of the first function in the trace. In the presence of trace gaps, this may cause some rather big jump. (gdb) record function-call-history /c 192, +8 192 sbrk 193 brk 194 __x86.get_pc_thunk.bx 195 brk 196 __kernel_vsyscall 197 [disabled] 198 __kernel_vsyscall 199 brk 200 sbrk This doesn't help to make things more clear. Let's remain on the same level instead. (gdb) record function-call-history /c 192, +8 192 sbrk 193 brk 194 __x86.get_pc_thunk.bx 195 brk 196 __kernel_vsyscall 197 [disabled] 198 __kernel_vsyscall 199 brk 200 sbrk In this case it will look like we were able to connect the trace parts across the disabled gap. We were not. More work is required to achieve this. In the general case, the function-call history for the two trace parts won't match. They may be off by a few levels or they may be entirely different. All this patch does is to preserve the indentation level of the record function-call-history command. The disabled gap is caused by a sysenter not returning to the next instruction. (gdb) record function-call-history /i 196, +1 196 __kernel_vsyscall inst 66515,66519 (gdb) record instruction-history 66515 66515 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx 66516 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx 66517 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp 66518 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp 66519 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter [disabled] 66520 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp 66521 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx 66522 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx 66523 0xb7fdcc0b <__kernel_vsyscall+19>: ret 66524 0xb7e8e09e <brk+30>: xchg %ecx,%ebx (gdb) disassemble 0xb7fdcbf8, 0xb7fdcc0c Dump of assembler code from 0xb7fdcbf8 to 0xb7fdcc0c: 0xb7fdcbf8 <__kernel_vsyscall+0>: push %ecx 0xb7fdcbf9 <__kernel_vsyscall+1>: push %edx 0xb7fdcbfa <__kernel_vsyscall+2>: push %ebp 0xb7fdcbfb <__kernel_vsyscall+3>: mov %esp,%ebp 0xb7fdcbfd <__kernel_vsyscall+5>: sysenter 0xb7fdcbff <__kernel_vsyscall+7>: nop 0xb7fdcc00 <__kernel_vsyscall+8>: nop 0xb7fdcc01 <__kernel_vsyscall+9>: nop 0xb7fdcc02 <__kernel_vsyscall+10>: nop 0xb7fdcc03 <__kernel_vsyscall+11>: nop 0xb7fdcc04 <__kernel_vsyscall+12>: nop 0xb7fdcc05 <__kernel_vsyscall+13>: nop 0xb7fdcc06 <__kernel_vsyscall+14>: int $0x80 0xb7fdcc08 <__kernel_vsyscall+16>: pop %ebp 0xb7fdcc09 <__kernel_vsyscall+17>: pop %edx 0xb7fdcc0a <__kernel_vsyscall+18>: pop %ecx 0xb7fdcc0b <__kernel_vsyscall+19>: ret End of assembler dump. I've seen this on 32-bit Fedora 23. I have not investigated what causes this and whether we can avoid the gap in the first place. Let's first try to make GDB handle such gaps more gracefully. gdb/ * btrace.c (ftrace_new_return): Start from the previous function's level if we can't find a matching call for a return.
2016-01-19 21:54:19 +08:00
prev->flags = BFUN_UP_LINKS_TO_RET;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_debug (bfun, "new return - unknown caller");
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
return bfun;
}
2017-05-30 18:47:37 +08:00
/* Add a new function segment for a function switch at the end of the trace.
BTINFO is the branch trace information for the current thread.
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
ftrace_new_switch (struct btrace_thread_info *btinfo,
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct minimal_symbol *mfun,
struct symbol *fun)
{
struct btrace_function *prev, *bfun;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* This is an unexplained function switch. We can't really be sure about the
call stack, yet the best I can think of right now is to preserve it. */
2017-05-30 18:47:37 +08:00
bfun = ftrace_new_function (btinfo, mfun, fun);
prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
bfun->up = prev->up;
bfun->flags = prev->flags;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
ftrace_debug (bfun, "new switch");
return bfun;
}
2017-05-30 18:47:37 +08:00
/* Add a new function segment for a gap in the trace due to a decode error at
the end of the trace.
BTINFO is the branch trace information for the current thread.
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
ERRCODE is the format-specific error code. */
static struct btrace_function *
2017-05-30 18:47:37 +08:00
ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
std::vector<unsigned int> &gaps)
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
{
struct btrace_function *bfun;
if (btinfo->functions.empty ())
2017-05-30 18:47:37 +08:00
bfun = ftrace_new_function (btinfo, NULL, NULL);
else
{
/* We hijack the previous function segment if it was empty. */
bfun = &btinfo->functions.back ();
if (bfun->errcode != 0 || !bfun->insn.empty ())
bfun = ftrace_new_function (btinfo, NULL, NULL);
}
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
bfun->errcode = errcode;
2017-05-30 18:47:37 +08:00
gaps.push_back (bfun->number);
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
ftrace_debug (bfun, "new gap");
return bfun;
}
2017-05-30 18:47:37 +08:00
/* Update the current function segment at the end of the trace in BTINFO with
respect to the instruction at PC. This may create new function segments.
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
Return the chronologically latest function segment, never NULL. */
static struct btrace_function *
2017-05-30 18:47:37 +08:00
ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
struct bound_minimal_symbol bmfun;
struct minimal_symbol *mfun;
struct symbol *fun;
struct btrace_function *bfun;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Try to determine the function we're in. We use both types of symbols
to avoid surprises when we sometimes get a full symbol and sometimes
only a minimal symbol. */
fun = find_pc_function (pc);
bmfun = lookup_minimal_symbol_by_pc (pc);
mfun = bmfun.minsym;
if (fun == NULL && mfun == NULL)
DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
/* If we didn't have a function, we create one. */
if (btinfo->functions.empty ())
return ftrace_new_function (btinfo, mfun, fun);
/* If we had a gap before, we create a function. */
bfun = &btinfo->functions.back ();
if (bfun->errcode != 0)
2017-05-30 18:47:37 +08:00
return ftrace_new_function (btinfo, mfun, fun);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* Check the last instruction, if we have one.
We do this check first, since it allows us to fill in the call stack
links in addition to the normal flow links. */
btrace_insn *last = NULL;
if (!bfun->insn.empty ())
last = &bfun->insn.back ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (last != NULL)
{
switch (last->iclass)
{
case BTRACE_INSN_RETURN:
{
const char *fname;
/* On some systems, _dl_runtime_resolve returns to the resolved
function instead of jumping to it. From our perspective,
however, this is a tailcall.
If we treated it as return, we wouldn't be able to find the
resolved function in our stack back trace. Hence, we would
lose the current stack back trace and start anew with an empty
back trace. When the resolved function returns, we would then
create a stack back trace with the same function names but
different frame id's. This will confuse stepping. */
fname = ftrace_print_function_name (bfun);
if (strcmp (fname, "_dl_runtime_resolve") == 0)
2017-05-30 18:47:37 +08:00
return ftrace_new_tailcall (btinfo, mfun, fun);
2017-05-30 18:47:37 +08:00
return ftrace_new_return (btinfo, mfun, fun);
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
case BTRACE_INSN_CALL:
/* Ignore calls to the next instruction. They are used for PIC. */
if (last->pc + last->size == pc)
break;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
2017-05-30 18:47:37 +08:00
return ftrace_new_call (btinfo, mfun, fun);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
case BTRACE_INSN_JUMP:
{
CORE_ADDR start;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
start = get_pc_function_start (pc);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* A jump to the start of a function is (typically) a tail call. */
if (start == pc)
2017-05-30 18:47:37 +08:00
return ftrace_new_tailcall (btinfo, mfun, fun);
/* If we can't determine the function for PC, we treat a jump at
the end of the block as tail call if we're switching functions
and as an intra-function branch if we don't. */
if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
2017-05-30 18:47:37 +08:00
return ftrace_new_tailcall (btinfo, mfun, fun);
break;
}
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* Check if we're switching functions for some other reason. */
if (ftrace_function_switched (bfun, mfun, fun))
{
DEBUG_FTRACE ("switching from %s in %s at %s",
ftrace_print_insn_addr (last),
ftrace_print_function_name (bfun),
ftrace_print_filename (bfun));
2017-05-30 18:47:37 +08:00
return ftrace_new_switch (btinfo, mfun, fun);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
return bfun;
}
/* Add the instruction at PC to BFUN's instructions. */
static void
ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
bfun->insn.push_back (insn);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (record_debug > 1)
ftrace_debug (bfun, "update insn");
}
/* Classify the instruction at PC. */
static enum btrace_insn_class
ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
{
enum btrace_insn_class iclass;
iclass = BTRACE_INSN_OTHER;
Split TRY_CATCH into TRY + CATCH This patch splits the TRY_CATCH macro into three, so that we go from this: ~~~ volatile gdb_exception ex; TRY_CATCH (ex, RETURN_MASK_ERROR) { } if (ex.reason < 0) { } ~~~ to this: ~~~ TRY { } CATCH (ex, RETURN_MASK_ERROR) { } END_CATCH ~~~ Thus, we'll be getting rid of the local volatile exception object, and declaring the caught exception in the catch block. This allows reimplementing TRY/CATCH in terms of C++ exceptions when building in C++ mode, while still allowing to build GDB in C mode (using setjmp/longjmp), as a transition step. TBC, after this patch, is it _not_ valid to have code between the TRY and the CATCH blocks, like: TRY { } // some code here. CATCH (ex, RETURN_MASK_ERROR) { } END_CATCH Just like it isn't valid to do that with C++'s native try/catch. By switching to creating the exception object inside the CATCH block scope, we can get rid of all the explicitly allocated volatile exception objects all over the tree, and map the CATCH block more directly to C++'s catch blocks. The majority of the TRY_CATCH -> TRY+CATCH+END_CATCH conversion was done with a script, rerun from scratch at every rebase, no manual editing involved. After the mechanical conversion, a few places needed manual intervention, to fix preexisting cases where we were using the exception object outside of the TRY_CATCH block, and cases where we were using "else" after a 'if (ex.reason) < 0)' [a CATCH after this patch]. The result was folded into this patch so that GDB still builds at each incremental step. END_CATCH is necessary for two reasons: First, because we name the exception object in the CATCH block, which requires creating a scope, which in turn must be closed somewhere. Declaring the exception variable in the initializer field of a for block, like: #define CATCH(EXCEPTION, mask) \ for (struct gdb_exception EXCEPTION; \ exceptions_state_mc_catch (&EXCEPTION, MASK); \ EXCEPTION = exception_none) would avoid needing END_CATCH, but alas, in C mode, we build with C90, which doesn't allow mixed declarations and code. Second, because when TRY/CATCH are wired to real C++ try/catch, as long as we need to handle cleanup chains, even if there's no CATCH block that wants to catch the exception, we need for stop at every frame in the unwind chain and run cleanups, then rethrow. That will be done in END_CATCH. After we require C++, we'll still need TRY/CATCH/END_CATCH until cleanups are completely phased out -- TRY/CATCH in C++ mode will save/restore the current cleanup chain, like in C mode, and END_CATCH catches otherwise uncaugh exceptions, runs cleanups and rethrows, so that C++ cleanups and exceptions can coexist. IMO, this still makes the TRY/CATCH code look a bit more like a newcomer would expect, so IMO worth it even if we weren't considering C++. gdb/ChangeLog. 2015-03-07 Pedro Alves <palves@redhat.com> * common/common-exceptions.c (struct catcher) <exception>: No longer a pointer to volatile exception. Now an exception value. <mask>: Delete field. (exceptions_state_mc_init): Remove all parameters. Adjust. (exceptions_state_mc): No longer pop the catcher here. (exceptions_state_mc_catch): New function. (throw_exception): Adjust. * common/common-exceptions.h (exceptions_state_mc_init): Remove all parameters. (exceptions_state_mc_catch): Declare. (TRY_CATCH): Rename to ... (TRY): ... this. Remove EXCEPTION and MASK parameters. (CATCH, END_CATCH): New. All callers adjusted. gdb/gdbserver/ChangeLog: 2015-03-07 Pedro Alves <palves@redhat.com> Adjust all callers of TRY_CATCH to use TRY/CATCH/END_CATCH instead.
2015-03-07 23:14:14 +08:00
TRY
{
if (gdbarch_insn_is_call (gdbarch, pc))
iclass = BTRACE_INSN_CALL;
else if (gdbarch_insn_is_ret (gdbarch, pc))
iclass = BTRACE_INSN_RETURN;
else if (gdbarch_insn_is_jump (gdbarch, pc))
iclass = BTRACE_INSN_JUMP;
}
Split TRY_CATCH into TRY + CATCH This patch splits the TRY_CATCH macro into three, so that we go from this: ~~~ volatile gdb_exception ex; TRY_CATCH (ex, RETURN_MASK_ERROR) { } if (ex.reason < 0) { } ~~~ to this: ~~~ TRY { } CATCH (ex, RETURN_MASK_ERROR) { } END_CATCH ~~~ Thus, we'll be getting rid of the local volatile exception object, and declaring the caught exception in the catch block. This allows reimplementing TRY/CATCH in terms of C++ exceptions when building in C++ mode, while still allowing to build GDB in C mode (using setjmp/longjmp), as a transition step. TBC, after this patch, is it _not_ valid to have code between the TRY and the CATCH blocks, like: TRY { } // some code here. CATCH (ex, RETURN_MASK_ERROR) { } END_CATCH Just like it isn't valid to do that with C++'s native try/catch. By switching to creating the exception object inside the CATCH block scope, we can get rid of all the explicitly allocated volatile exception objects all over the tree, and map the CATCH block more directly to C++'s catch blocks. The majority of the TRY_CATCH -> TRY+CATCH+END_CATCH conversion was done with a script, rerun from scratch at every rebase, no manual editing involved. After the mechanical conversion, a few places needed manual intervention, to fix preexisting cases where we were using the exception object outside of the TRY_CATCH block, and cases where we were using "else" after a 'if (ex.reason) < 0)' [a CATCH after this patch]. The result was folded into this patch so that GDB still builds at each incremental step. END_CATCH is necessary for two reasons: First, because we name the exception object in the CATCH block, which requires creating a scope, which in turn must be closed somewhere. Declaring the exception variable in the initializer field of a for block, like: #define CATCH(EXCEPTION, mask) \ for (struct gdb_exception EXCEPTION; \ exceptions_state_mc_catch (&EXCEPTION, MASK); \ EXCEPTION = exception_none) would avoid needing END_CATCH, but alas, in C mode, we build with C90, which doesn't allow mixed declarations and code. Second, because when TRY/CATCH are wired to real C++ try/catch, as long as we need to handle cleanup chains, even if there's no CATCH block that wants to catch the exception, we need for stop at every frame in the unwind chain and run cleanups, then rethrow. That will be done in END_CATCH. After we require C++, we'll still need TRY/CATCH/END_CATCH until cleanups are completely phased out -- TRY/CATCH in C++ mode will save/restore the current cleanup chain, like in C mode, and END_CATCH catches otherwise uncaugh exceptions, runs cleanups and rethrows, so that C++ cleanups and exceptions can coexist. IMO, this still makes the TRY/CATCH code look a bit more like a newcomer would expect, so IMO worth it even if we weren't considering C++. gdb/ChangeLog. 2015-03-07 Pedro Alves <palves@redhat.com> * common/common-exceptions.c (struct catcher) <exception>: No longer a pointer to volatile exception. Now an exception value. <mask>: Delete field. (exceptions_state_mc_init): Remove all parameters. Adjust. (exceptions_state_mc): No longer pop the catcher here. (exceptions_state_mc_catch): New function. (throw_exception): Adjust. * common/common-exceptions.h (exceptions_state_mc_init): Remove all parameters. (exceptions_state_mc_catch): Declare. (TRY_CATCH): Rename to ... (TRY): ... this. Remove EXCEPTION and MASK parameters. (CATCH, END_CATCH): New. All callers adjusted. gdb/gdbserver/ChangeLog: 2015-03-07 Pedro Alves <palves@redhat.com> Adjust all callers of TRY_CATCH to use TRY/CATCH/END_CATCH instead.
2015-03-07 23:14:14 +08:00
CATCH (error, RETURN_MASK_ERROR)
{
}
END_CATCH
return iclass;
}
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* Try to match the back trace at LHS to the back trace at RHS. Returns the
number of matching function segments or zero if the back traces do not
match. BTINFO is the branch trace information for the current thread. */
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
static int
ftrace_match_backtrace (struct btrace_thread_info *btinfo,
struct btrace_function *lhs,
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
struct btrace_function *rhs)
{
int matches;
for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
{
if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
return 0;
lhs = ftrace_get_caller (btinfo, lhs);
rhs = ftrace_get_caller (btinfo, rhs);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
return matches;
}
/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
BTINFO is the branch trace information for the current thread. */
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
static void
ftrace_fixup_level (struct btrace_thread_info *btinfo,
struct btrace_function *bfun, int adjustment)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
if (adjustment == 0)
return;
DEBUG_FTRACE ("fixup level (%+d)", adjustment);
ftrace_debug (bfun, "..bfun");
while (bfun != NULL)
{
bfun->level += adjustment;
bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
}
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
/* Recompute the global level offset. Traverse the function trace and compute
the global level offset as the negative of the minimal function level. */
static void
ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
{
int level = INT_MAX;
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
if (btinfo == NULL)
return;
if (btinfo->functions.empty ())
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
return;
unsigned int length = btinfo->functions.size() - 1;
for (unsigned int i = 0; i < length; ++i)
level = std::min (level, btinfo->functions[i].level);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* The last function segment contains the current instruction, which is not
really part of the trace. If it contains just this one instruction, we
ignore the segment. */
struct btrace_function *last = &btinfo->functions.back();
if (last->insn.size () != 1)
level = std::min (level, last->level);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
DEBUG_FTRACE ("setting global level offset: %d", -level);
btinfo->level = -level;
}
/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
ftrace_connect_backtrace. BTINFO is the branch trace information for the
current thread. */
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
static void
ftrace_connect_bfun (struct btrace_thread_info *btinfo,
struct btrace_function *prev,
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
struct btrace_function *next)
{
DEBUG_FTRACE ("connecting...");
ftrace_debug (prev, "..prev");
ftrace_debug (next, "..next");
/* The function segments are not yet connected. */
gdb_assert (prev->next == 0);
gdb_assert (next->prev == 0);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
prev->next = next->number;
next->prev = prev->number;
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* We may have moved NEXT to a different function level. */
ftrace_fixup_level (btinfo, next, prev->level - next->level);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* If we run out of back trace for one, let's use the other's. */
if (prev->up == 0)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
const btrace_function_flags flags = next->flags;
next = ftrace_find_call_by_number (btinfo, next->up);
if (next != NULL)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
DEBUG_FTRACE ("using next's callers");
ftrace_fixup_caller (btinfo, prev, next, flags);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
}
else if (next->up == 0)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
const btrace_function_flags flags = prev->flags;
prev = ftrace_find_call_by_number (btinfo, prev->up);
if (prev != NULL)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
DEBUG_FTRACE ("using prev's callers");
ftrace_fixup_caller (btinfo, next, prev, flags);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
}
else
{
/* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
link to add the tail callers to NEXT's back trace.
This removes NEXT->UP from NEXT's back trace. It will be added back
when connecting NEXT and PREV's callers - provided they exist.
If PREV's back trace consists of a series of tail calls without an
actual call, there will be no further connection and NEXT's caller will
be removed for good. To catch this case, we handle it here and connect
the top of PREV's back trace to NEXT's caller. */
if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
{
struct btrace_function *caller;
btrace_function_flags next_flags, prev_flags;
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* We checked NEXT->UP above so CALLER can't be NULL. */
caller = ftrace_find_call_by_number (btinfo, next->up);
next_flags = next->flags;
prev_flags = prev->flags;
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
DEBUG_FTRACE ("adding prev's tail calls to next");
prev = ftrace_find_call_by_number (btinfo, prev->up);
ftrace_fixup_caller (btinfo, next, prev, prev_flags);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
prev->up))
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
/* At the end of PREV's back trace, continue with CALLER. */
if (prev->up == 0)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
DEBUG_FTRACE ("fixing up link for tailcall chain");
ftrace_debug (prev, "..top");
ftrace_debug (caller, "..up");
ftrace_fixup_caller (btinfo, prev, caller, next_flags);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* If we skipped any tail calls, this may move CALLER to a
different function level.
Note that changing CALLER's level is only OK because we
know that this is the last iteration of the bottom-to-top
walk in ftrace_connect_backtrace.
Otherwise we will fix up CALLER's level when we connect it
to PREV's caller in the next iteration. */
ftrace_fixup_level (btinfo, caller,
prev->level - caller->level - 1);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
break;
}
/* There's nothing to do if we find a real call. */
if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
{
DEBUG_FTRACE ("will fix up link in next iteration");
break;
}
}
}
}
}
/* Connect function segments on the same level in the back trace at LHS and RHS.
The back traces at LHS and RHS are expected to match according to
ftrace_match_backtrace. BTINFO is the branch trace information for the
current thread. */
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
static void
ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
struct btrace_function *lhs,
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
struct btrace_function *rhs)
{
while (lhs != NULL && rhs != NULL)
{
struct btrace_function *prev, *next;
gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
/* Connecting LHS and RHS may change the up link. */
prev = lhs;
next = rhs;
lhs = ftrace_get_caller (btinfo, lhs);
rhs = ftrace_get_caller (btinfo, rhs);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
ftrace_connect_bfun (btinfo, prev, next);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
}
/* Bridge the gap between two function segments left and right of a gap if their
respective back traces match in at least MIN_MATCHES functions. BTINFO is
the branch trace information for the current thread.
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
Returns non-zero if the gap could be bridged, zero otherwise. */
static int
ftrace_bridge_gap (struct btrace_thread_info *btinfo,
struct btrace_function *lhs, struct btrace_function *rhs,
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
int min_matches)
{
struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
int best_matches;
DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
rhs->insn_offset - 1, min_matches);
best_matches = 0;
best_l = NULL;
best_r = NULL;
/* We search the back traces of LHS and RHS for valid connections and connect
the two functon segments that give the longest combined back trace. */
for (cand_l = lhs; cand_l != NULL;
cand_l = ftrace_get_caller (btinfo, cand_l))
for (cand_r = rhs; cand_r != NULL;
cand_r = ftrace_get_caller (btinfo, cand_r))
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
int matches;
matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
if (best_matches < matches)
{
best_matches = matches;
best_l = cand_l;
best_r = cand_r;
}
}
/* We need at least MIN_MATCHES matches. */
gdb_assert (min_matches > 0);
if (best_matches < min_matches)
return 0;
DEBUG_FTRACE ("..matches: %d", best_matches);
/* We will fix up the level of BEST_R and succeeding function segments such
that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
To catch this, we already fix up the level here where we can start at RHS
instead of at BEST_R. We will ignore the level fixup when connecting
BEST_L to BEST_R as they will already be on the same level. */
ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
ftrace_connect_backtrace (btinfo, best_l, best_r);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
return best_matches;
}
/* Try to bridge gaps due to overflow or decode errors by connecting the
function segments that are separated by the gap. */
static void
2017-05-30 18:47:37 +08:00
btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
struct btrace_thread_info *btinfo = &tp->btrace;
2017-05-30 18:47:37 +08:00
std::vector<unsigned int> remaining;
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
int min_matches;
DEBUG ("bridge gaps");
/* We require a minimum amount of matches for bridging a gap. The number of
required matches will be lowered with each iteration.
The more matches the higher our confidence that the bridging is correct.
For big gaps or small traces, however, it may not be feasible to require a
high number of matches. */
for (min_matches = 5; min_matches > 0; --min_matches)
{
/* Let's try to bridge as many gaps as we can. In some cases, we need to
skip a gap and revisit it again after we closed later gaps. */
2017-05-30 18:47:37 +08:00
while (!gaps.empty ())
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
2017-05-30 18:47:37 +08:00
for (const unsigned int number : gaps)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
2017-05-30 18:47:37 +08:00
struct btrace_function *gap, *lhs, *rhs;
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
int bridged;
2017-05-30 18:47:37 +08:00
gap = ftrace_find_call_by_number (btinfo, number);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* We may have a sequence of gaps if we run from one error into
the next as we try to re-sync onto the trace stream. Ignore
all but the leftmost gap in such a sequence.
Also ignore gaps at the beginning of the trace. */
lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
if (lhs == NULL || lhs->errcode != 0)
continue;
/* Skip gaps to the right. */
rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
while (rhs != NULL && rhs->errcode != 0)
rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* Ignore gaps at the end of the trace. */
if (rhs == NULL)
continue;
bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
/* Keep track of gaps we were not able to bridge and try again.
If we just pushed them to the end of GAPS we would risk an
infinite loop in case we simply cannot bridge a gap. */
if (bridged == 0)
2017-05-30 18:47:37 +08:00
remaining.push_back (number);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
/* Let's see if we made any progress. */
2017-05-30 18:47:37 +08:00
if (remaining.size () == gaps.size ())
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
break;
2017-05-30 18:47:37 +08:00
gaps.clear ();
gaps.swap (remaining);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
/* We get here if either GAPS is empty or if GAPS equals REMAINING. */
2017-05-30 18:47:37 +08:00
if (gaps.empty ())
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
break;
2017-05-30 18:47:37 +08:00
remaining.clear ();
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
/* We may omit this in some cases. Not sure it is worth the extra
complication, though. */
ftrace_compute_global_level_offset (btinfo);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
/* Compute the function branch trace from BTS trace. */
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
static void
btrace_compute_ftrace_bts (struct thread_info *tp,
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
const struct btrace_data_bts *btrace,
2017-05-30 18:47:37 +08:00
std::vector<unsigned int> &gaps)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
struct btrace_thread_info *btinfo;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct gdbarch *gdbarch;
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
unsigned int blk;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
int level;
gdbarch = target_gdbarch ();
btinfo = &tp->btrace;
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
blk = VEC_length (btrace_block_s, btrace->blocks);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (btinfo->functions.empty ())
level = INT_MAX;
else
level = -btinfo->level;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
while (blk != 0)
{
btrace_block_s *block;
CORE_ADDR pc;
blk -= 1;
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
block = VEC_index (btrace_block_s, btrace->blocks, blk);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
pc = block->begin;
for (;;)
{
struct btrace_function *bfun;
struct btrace_insn insn;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
int size;
/* We should hit the end of the block. Warn if we went too far. */
if (block->end < pc)
{
btrace: allow leading trace gaps GDB ignores trace gaps from decode errors or overflows at the beginning of the trace. There isn't really a gap in the trace; the trace just starts a bit later than expected. In cases where there is no trace at all or where the trace is smaller than expected, this may hide the reason for the missing trace. Allow leading trace gaps. They will be shown as decode warnings and by the record function-call-history command. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Decode error (-6) at instruction 0 (offset = 0x58, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 0 (offset = 0xb0, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 0 (offset = 0x168, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xe08, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xe60, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xed8, pc = 0x0): unexpected packet context. Recorded 91582 instructions in 1111 functions (6 gaps) for thread 1 (process 15710). (gdb) record function-call-history /c 1 1 [decode error (-6): unexpected packet context] 2 [decode error (-6): unexpected packet context] 3 [decode error (-6): unexpected packet context] 4 _dl_addr 5 ?? 6 _dl_addr 7 ?? 8 ?? 9 ?? 10 ?? Leading trace gaps will not be shown by the record instruction-history command without further changes. gdb/ * btrace.c (btrace_compute_ftrace_bts, ftrace_add_pt): Allow leading gaps. * record-btrace.c (record_btrace_single_step_forward) (record_btrace_single_step_backward): Jump back to last instruction if step ends at a gap. (record_btrace_goto_begin): Skip gaps.
2016-01-18 23:59:21 +08:00
/* Indicate the gap in the trace. */
2017-05-30 18:47:37 +08:00
bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
btrace: allow leading trace gaps GDB ignores trace gaps from decode errors or overflows at the beginning of the trace. There isn't really a gap in the trace; the trace just starts a bit later than expected. In cases where there is no trace at all or where the trace is smaller than expected, this may hide the reason for the missing trace. Allow leading trace gaps. They will be shown as decode warnings and by the record function-call-history command. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Decode error (-6) at instruction 0 (offset = 0x58, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 0 (offset = 0xb0, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 0 (offset = 0x168, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xe08, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xe60, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xed8, pc = 0x0): unexpected packet context. Recorded 91582 instructions in 1111 functions (6 gaps) for thread 1 (process 15710). (gdb) record function-call-history /c 1 1 [decode error (-6): unexpected packet context] 2 [decode error (-6): unexpected packet context] 3 [decode error (-6): unexpected packet context] 4 _dl_addr 5 ?? 6 _dl_addr 7 ?? 8 ?? 9 ?? 10 ?? Leading trace gaps will not be shown by the record instruction-history command without further changes. gdb/ * btrace.c (btrace_compute_ftrace_bts, ftrace_add_pt): Allow leading gaps. * record-btrace.c (record_btrace_single_step_forward) (record_btrace_single_step_backward): Jump back to last instruction if step ends at a gap. (record_btrace_goto_begin): Skip gaps.
2016-01-18 23:59:21 +08:00
warning (_("Recorded trace may be corrupted at instruction "
"%u (pc = %s)."), bfun->insn_offset - 1,
btrace: allow leading trace gaps GDB ignores trace gaps from decode errors or overflows at the beginning of the trace. There isn't really a gap in the trace; the trace just starts a bit later than expected. In cases where there is no trace at all or where the trace is smaller than expected, this may hide the reason for the missing trace. Allow leading trace gaps. They will be shown as decode warnings and by the record function-call-history command. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Decode error (-6) at instruction 0 (offset = 0x58, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 0 (offset = 0xb0, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 0 (offset = 0x168, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xe08, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xe60, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xed8, pc = 0x0): unexpected packet context. Recorded 91582 instructions in 1111 functions (6 gaps) for thread 1 (process 15710). (gdb) record function-call-history /c 1 1 [decode error (-6): unexpected packet context] 2 [decode error (-6): unexpected packet context] 3 [decode error (-6): unexpected packet context] 4 _dl_addr 5 ?? 6 _dl_addr 7 ?? 8 ?? 9 ?? 10 ?? Leading trace gaps will not be shown by the record instruction-history command without further changes. gdb/ * btrace.c (btrace_compute_ftrace_bts, ftrace_add_pt): Allow leading gaps. * record-btrace.c (record_btrace_single_step_forward) (record_btrace_single_step_backward): Jump back to last instruction if step ends at a gap. (record_btrace_goto_begin): Skip gaps.
2016-01-18 23:59:21 +08:00
core_addr_to_string_nz (pc));
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
break;
}
bfun = ftrace_update_function (btinfo, pc);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
record-btrace: optionally indent function call history Add a new modifier /c to the "record function-call-history" command to indent the function name based on its depth in the call stack. Also reorder the optional fields to have the indentation at the very beginning. Prefix the insn range (/i modifier) with "inst ". Prefix the source line (/l modifier) with "at ". Change the range syntax from "begin-end" to "begin,end" to allow copy&paste to the "record instruction-history" and "list" commands. Adjust the respective tests and add new tests for the /c modifier. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * record.h (enum record_print_flag) <record_print_indent_calls>: New. * record.c (get_call_history_modifiers): Recognize /c modifier. (_initialize_record): Document /c modifier. * record-btrace.c (btrace_call_history): Add btinfo parameter. Reorder fields. Optionally indent the function name. Update all users. * NEWS: Announce changes. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected field order for "record function-call-history". Add new tests for "record function-call-history /c". * gdb.btrace/exception.cc: New. * gdb.btrace/exception.exp: New. * gdb.btrace/tailcall.exp: New. * gdb.btrace/x86-tailcall.S: New. * gdb.btrace/x86-tailcall.c: New. * gdb.btrace/unknown_functions.c: New. * gdb.btrace/unknown_functions.exp: New. * gdb.btrace/Makefile.in (EXECUTABLES): Add new. doc/ * gdb.texinfo (Process Record and Replay): Document new /c modifier accepted by "record function-call-history". Add /i modifier to "record function-call-history" example.
2013-04-18 16:58:05 +08:00
/* Maintain the function level offset.
For all but the last block, we do it here. */
if (blk != 0)
level = std::min (level, bfun->level);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
size = 0;
Split TRY_CATCH into TRY + CATCH This patch splits the TRY_CATCH macro into three, so that we go from this: ~~~ volatile gdb_exception ex; TRY_CATCH (ex, RETURN_MASK_ERROR) { } if (ex.reason < 0) { } ~~~ to this: ~~~ TRY { } CATCH (ex, RETURN_MASK_ERROR) { } END_CATCH ~~~ Thus, we'll be getting rid of the local volatile exception object, and declaring the caught exception in the catch block. This allows reimplementing TRY/CATCH in terms of C++ exceptions when building in C++ mode, while still allowing to build GDB in C mode (using setjmp/longjmp), as a transition step. TBC, after this patch, is it _not_ valid to have code between the TRY and the CATCH blocks, like: TRY { } // some code here. CATCH (ex, RETURN_MASK_ERROR) { } END_CATCH Just like it isn't valid to do that with C++'s native try/catch. By switching to creating the exception object inside the CATCH block scope, we can get rid of all the explicitly allocated volatile exception objects all over the tree, and map the CATCH block more directly to C++'s catch blocks. The majority of the TRY_CATCH -> TRY+CATCH+END_CATCH conversion was done with a script, rerun from scratch at every rebase, no manual editing involved. After the mechanical conversion, a few places needed manual intervention, to fix preexisting cases where we were using the exception object outside of the TRY_CATCH block, and cases where we were using "else" after a 'if (ex.reason) < 0)' [a CATCH after this patch]. The result was folded into this patch so that GDB still builds at each incremental step. END_CATCH is necessary for two reasons: First, because we name the exception object in the CATCH block, which requires creating a scope, which in turn must be closed somewhere. Declaring the exception variable in the initializer field of a for block, like: #define CATCH(EXCEPTION, mask) \ for (struct gdb_exception EXCEPTION; \ exceptions_state_mc_catch (&EXCEPTION, MASK); \ EXCEPTION = exception_none) would avoid needing END_CATCH, but alas, in C mode, we build with C90, which doesn't allow mixed declarations and code. Second, because when TRY/CATCH are wired to real C++ try/catch, as long as we need to handle cleanup chains, even if there's no CATCH block that wants to catch the exception, we need for stop at every frame in the unwind chain and run cleanups, then rethrow. That will be done in END_CATCH. After we require C++, we'll still need TRY/CATCH/END_CATCH until cleanups are completely phased out -- TRY/CATCH in C++ mode will save/restore the current cleanup chain, like in C mode, and END_CATCH catches otherwise uncaugh exceptions, runs cleanups and rethrows, so that C++ cleanups and exceptions can coexist. IMO, this still makes the TRY/CATCH code look a bit more like a newcomer would expect, so IMO worth it even if we weren't considering C++. gdb/ChangeLog. 2015-03-07 Pedro Alves <palves@redhat.com> * common/common-exceptions.c (struct catcher) <exception>: No longer a pointer to volatile exception. Now an exception value. <mask>: Delete field. (exceptions_state_mc_init): Remove all parameters. Adjust. (exceptions_state_mc): No longer pop the catcher here. (exceptions_state_mc_catch): New function. (throw_exception): Adjust. * common/common-exceptions.h (exceptions_state_mc_init): Remove all parameters. (exceptions_state_mc_catch): Declare. (TRY_CATCH): Rename to ... (TRY): ... this. Remove EXCEPTION and MASK parameters. (CATCH, END_CATCH): New. All callers adjusted. gdb/gdbserver/ChangeLog: 2015-03-07 Pedro Alves <palves@redhat.com> Adjust all callers of TRY_CATCH to use TRY/CATCH/END_CATCH instead.
2015-03-07 23:14:14 +08:00
TRY
{
size = gdb_insn_length (gdbarch, pc);
}
CATCH (error, RETURN_MASK_ERROR)
{
}
END_CATCH
insn.pc = pc;
insn.size = size;
insn.iclass = ftrace_classify_insn (gdbarch, pc);
insn.flags = 0;
ftrace_update_insns (bfun, insn);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* We're done once we pushed the instruction at the end. */
if (block->end == pc)
break;
/* We can't continue if we fail to compute the size. */
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (size <= 0)
{
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* Indicate the gap in the trace. We just added INSN so we're
not at the beginning. */
2017-05-30 18:47:37 +08:00
bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
warning (_("Recorded trace may be incomplete at instruction %u "
"(pc = %s)."), bfun->insn_offset - 1,
core_addr_to_string_nz (pc));
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
break;
}
pc += size;
record-btrace: optionally indent function call history Add a new modifier /c to the "record function-call-history" command to indent the function name based on its depth in the call stack. Also reorder the optional fields to have the indentation at the very beginning. Prefix the insn range (/i modifier) with "inst ". Prefix the source line (/l modifier) with "at ". Change the range syntax from "begin-end" to "begin,end" to allow copy&paste to the "record instruction-history" and "list" commands. Adjust the respective tests and add new tests for the /c modifier. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * record.h (enum record_print_flag) <record_print_indent_calls>: New. * record.c (get_call_history_modifiers): Recognize /c modifier. (_initialize_record): Document /c modifier. * record-btrace.c (btrace_call_history): Add btinfo parameter. Reorder fields. Optionally indent the function name. Update all users. * NEWS: Announce changes. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected field order for "record function-call-history". Add new tests for "record function-call-history /c". * gdb.btrace/exception.cc: New. * gdb.btrace/exception.exp: New. * gdb.btrace/tailcall.exp: New. * gdb.btrace/x86-tailcall.S: New. * gdb.btrace/x86-tailcall.c: New. * gdb.btrace/unknown_functions.c: New. * gdb.btrace/unknown_functions.exp: New. * gdb.btrace/Makefile.in (EXECUTABLES): Add new. doc/ * gdb.texinfo (Process Record and Replay): Document new /c modifier accepted by "record function-call-history". Add /i modifier to "record function-call-history" example.
2013-04-18 16:58:05 +08:00
/* Maintain the function level offset.
For the last block, we do it here to not consider the last
instruction.
Since the last instruction corresponds to the current instruction
and is not really part of the execution history, it shouldn't
affect the level. */
if (blk == 0)
level = std::min (level, bfun->level);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* LEVEL is the minimal function level of all btrace function segments.
Define the global level offset to -LEVEL so all function levels are
normalized to start at zero. */
btinfo->level = -level;
}
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
#if defined (HAVE_LIBIPT)
static enum btrace_insn_class
pt_reclassify_insn (enum pt_insn_class iclass)
{
switch (iclass)
{
case ptic_call:
return BTRACE_INSN_CALL;
case ptic_return:
return BTRACE_INSN_RETURN;
case ptic_jump:
return BTRACE_INSN_JUMP;
default:
return BTRACE_INSN_OTHER;
}
}
/* Return the btrace instruction flags for INSN. */
static btrace_insn_flags
pt_btrace_insn_flags (const struct pt_insn &insn)
{
btrace_insn_flags flags = 0;
if (insn.speculative)
flags |= BTRACE_INSN_FLAG_SPECULATIVE;
return flags;
}
/* Return the btrace instruction for INSN. */
static btrace_insn
pt_btrace_insn (const struct pt_insn &insn)
{
return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
pt_reclassify_insn (insn.iclass),
pt_btrace_insn_flags (insn)};
}
/* Handle instruction decode events (libipt-v2). */
static int
handle_pt_insn_events (struct btrace_thread_info *btinfo,
struct pt_insn_decoder *decoder,
std::vector<unsigned int> &gaps, int status)
{
#if defined (HAVE_PT_INSN_EVENT)
while (status & pts_event_pending)
{
struct btrace_function *bfun;
struct pt_event event;
uint64_t offset;
status = pt_insn_event (decoder, &event, sizeof (event));
if (status < 0)
break;
switch (event.type)
{
default:
break;
case ptev_enabled:
if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
{
bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
pt_insn_get_offset (decoder, &offset);
warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
PRIx64 ")."), bfun->insn_offset - 1, offset);
}
break;
case ptev_overflow:
bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
pt_insn_get_offset (decoder, &offset);
warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
bfun->insn_offset - 1, offset);
break;
}
}
#endif /* defined (HAVE_PT_INSN_EVENT) */
return status;
}
/* Handle events indicated by flags in INSN (libipt-v1). */
static void
handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
struct pt_insn_decoder *decoder,
const struct pt_insn &insn,
std::vector<unsigned int> &gaps)
{
#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
/* Tracing is disabled and re-enabled each time we enter the kernel. Most
times, we continue from the same instruction we stopped before. This is
indicated via the RESUMED instruction flag. The ENABLED instruction flag
means that we continued from some other instruction. Indicate this as a
trace gap except when tracing just started. */
if (insn.enabled && !btinfo->functions.empty ())
{
struct btrace_function *bfun;
uint64_t offset;
bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
pt_insn_get_offset (decoder, &offset);
warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
insn.ip);
}
#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
/* Indicate trace overflows. */
if (insn.resynced)
{
struct btrace_function *bfun;
uint64_t offset;
bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
pt_insn_get_offset (decoder, &offset);
warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
}
#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
}
/* Add function branch trace to BTINFO using DECODER. */
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
static void
ftrace_add_pt (struct btrace_thread_info *btinfo,
struct pt_insn_decoder *decoder,
int *plevel,
2017-05-30 18:47:37 +08:00
std::vector<unsigned int> &gaps)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
{
struct btrace_function *bfun;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
uint64_t offset;
int status;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
for (;;)
{
struct pt_insn insn;
status = pt_insn_sync_forward (decoder);
if (status < 0)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
{
if (status != -pte_eos)
warning (_("Failed to synchronize onto the Intel Processor "
"Trace stream: %s."), pt_errstr (pt_errcode (status)));
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
break;
}
for (;;)
{
/* Handle events from the previous iteration or synchronization. */
status = handle_pt_insn_events (btinfo, decoder, gaps, status);
if (status < 0)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
break;
status = pt_insn_next (decoder, &insn, sizeof(insn));
if (status < 0)
break;
btrace: allow leading trace gaps GDB ignores trace gaps from decode errors or overflows at the beginning of the trace. There isn't really a gap in the trace; the trace just starts a bit later than expected. In cases where there is no trace at all or where the trace is smaller than expected, this may hide the reason for the missing trace. Allow leading trace gaps. They will be shown as decode warnings and by the record function-call-history command. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Decode error (-6) at instruction 0 (offset = 0x58, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 0 (offset = 0xb0, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 0 (offset = 0x168, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xe08, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xe60, pc = 0x0): unexpected packet context. warning: Decode error (-6) at instruction 54205 (offset = 0xed8, pc = 0x0): unexpected packet context. Recorded 91582 instructions in 1111 functions (6 gaps) for thread 1 (process 15710). (gdb) record function-call-history /c 1 1 [decode error (-6): unexpected packet context] 2 [decode error (-6): unexpected packet context] 3 [decode error (-6): unexpected packet context] 4 _dl_addr 5 ?? 6 _dl_addr 7 ?? 8 ?? 9 ?? 10 ?? Leading trace gaps will not be shown by the record instruction-history command without further changes. gdb/ * btrace.c (btrace_compute_ftrace_bts, ftrace_add_pt): Allow leading gaps. * record-btrace.c (record_btrace_single_step_forward) (record_btrace_single_step_backward): Jump back to last instruction if step ends at a gap. (record_btrace_goto_begin): Skip gaps.
2016-01-18 23:59:21 +08:00
/* Handle events indicated by flags in INSN. */
handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
bfun = ftrace_update_function (btinfo, insn.ip);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
/* Maintain the function level offset. */
*plevel = std::min (*plevel, bfun->level);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
btrace_insn btinsn = pt_btrace_insn (insn);
ftrace_update_insns (bfun, &btinsn);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
}
if (status == -pte_eos)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
break;
/* Indicate the gap in the trace. */
bfun = ftrace_new_gap (btinfo, status, gaps);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
pt_insn_get_offset (decoder, &offset);
warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
offset, insn.ip, pt_errstr (pt_errcode (status)));
}
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
}
/* A callback function to allow the trace decoder to read the inferior's
memory. */
static int
btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
const struct pt_asid *asid, uint64_t pc,
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
void *context)
{
int result, errcode;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
result = (int) size;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
TRY
{
errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
if (errcode != 0)
result = -pte_nomap;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
}
CATCH (error, RETURN_MASK_ERROR)
{
result = -pte_nomap;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
}
END_CATCH
return result;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
}
/* Translate the vendor from one enum to another. */
static enum pt_cpu_vendor
pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
{
switch (vendor)
{
default:
return pcv_unknown;
case CV_INTEL:
return pcv_intel;
}
}
/* Finalize the function branch trace after decode. */
static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
struct thread_info *tp, int level)
{
pt_insn_free_decoder (decoder);
/* LEVEL is the minimal function level of all btrace function segments.
Define the global level offset to -LEVEL so all function levels are
normalized to start at zero. */
tp->btrace.level = -level;
/* Add a single last instruction entry for the current PC.
This allows us to compute the backtrace at the current PC using both
standard unwind and btrace unwind.
This extra entry is ignored by all record commands. */
btrace_add_pc (tp);
}
/* Compute the function branch trace from Intel Processor Trace
format. */
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
static void
btrace_compute_ftrace_pt (struct thread_info *tp,
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
const struct btrace_data_pt *btrace,
2017-05-30 18:47:37 +08:00
std::vector<unsigned int> &gaps)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
{
struct btrace_thread_info *btinfo;
struct pt_insn_decoder *decoder;
struct pt_config config;
int level, errcode;
if (btrace->size == 0)
return;
btinfo = &tp->btrace;
if (btinfo->functions.empty ())
level = INT_MAX;
else
level = -btinfo->level;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
pt_config_init(&config);
config.begin = btrace->data;
config.end = btrace->data + btrace->size;
config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
config.cpu.family = btrace->config.cpu.family;
config.cpu.model = btrace->config.cpu.model;
config.cpu.stepping = btrace->config.cpu.stepping;
errcode = pt_cpu_errata (&config.errata, &config.cpu);
if (errcode < 0)
error (_("Failed to configure the Intel Processor Trace decoder: %s."),
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
pt_errstr (pt_errcode (errcode)));
decoder = pt_insn_alloc_decoder (&config);
if (decoder == NULL)
error (_("Failed to allocate the Intel Processor Trace decoder."));
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
TRY
{
struct pt_image *image;
image = pt_insn_get_image(decoder);
if (image == NULL)
error (_("Failed to configure the Intel Processor Trace decoder."));
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
if (errcode < 0)
error (_("Failed to configure the Intel Processor Trace decoder: "
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
"%s."), pt_errstr (pt_errcode (errcode)));
ftrace_add_pt (btinfo, decoder, &level, gaps);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
}
CATCH (error, RETURN_MASK_ALL)
{
/* Indicate a gap in the trace if we quit trace processing. */
if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
2017-05-30 18:47:37 +08:00
ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
btrace_finalize_ftrace_pt (decoder, tp, level);
throw_exception (error);
}
END_CATCH
btrace_finalize_ftrace_pt (decoder, tp, level);
}
#else /* defined (HAVE_LIBIPT) */
static void
btrace_compute_ftrace_pt (struct thread_info *tp,
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
const struct btrace_data_pt *btrace,
2017-05-30 18:47:37 +08:00
std::vector<unsigned int> &gaps)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
{
internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
}
#endif /* defined (HAVE_LIBIPT) */
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
/* Compute the function branch trace from a block branch trace BTRACE for
a thread given by BTINFO. */
static void
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
2017-05-30 18:47:37 +08:00
std::vector<unsigned int> &gaps)
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
{
DEBUG ("compute ftrace");
switch (btrace->format)
{
case BTRACE_FORMAT_NONE:
return;
case BTRACE_FORMAT_BTS:
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
return;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
case BTRACE_FORMAT_PT:
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
return;
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
}
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
static void
2017-05-30 18:47:37 +08:00
btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
2017-05-30 18:47:37 +08:00
if (!gaps.empty ())
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
{
2017-05-30 18:47:37 +08:00
tp->btrace.ngaps += gaps.size ();
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
btrace_bridge_gaps (tp, gaps);
}
}
static void
btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
{
2017-05-30 18:47:37 +08:00
std::vector<unsigned int> gaps;
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
TRY
{
2017-05-30 18:47:37 +08:00
btrace_compute_ftrace_1 (tp, btrace, gaps);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
CATCH (error, RETURN_MASK_ALL)
{
2017-05-30 18:47:37 +08:00
btrace_finalize_ftrace (tp, gaps);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
throw_exception (error);
}
END_CATCH
2017-05-30 18:47:37 +08:00
btrace_finalize_ftrace (tp, gaps);
btrace: bridge gaps Most of the time, the trace should be in one piece. This case is handled fine by GDB. In some cases, however, there may be gaps in the trace. They result from trace decode errors or from overflows. A gap in the trace means we lost an unknown amount of trace. Gaps can be very small, such as a few instructions in the same function, or they can be rather big. We may, for example, lose a few function calls or returns. The trace may continue in a different function and we likely don't know how we got there. Even though we can't say how the program executed across a gap, higher levels may not be impacted too much by it. Let's assume we have functions a-e and a trace that looks roughly like this: a \ b b \ / c <gap> c / d d \ / e Even though we can't say for sure, it is likely that b and c are the same function instance before and after the gap. This patch is trying to connect the c and b function segments across the gap. This will add a to the back trace of b on the right hand side. The changes are reflected in GDB's internal representation of the trace and will improve: - the output of "record function-call-history /c" - the output of "backtrace" in replay mode - source stepping in replay mode will be improved indirectly via the improved back trace I don't have an automated test for this patch; decode errors will be fixed and overflows occur sporadically and are quite rare. I tested it by hacking GDB to provoke a decode error and on the expected gap in the gdb.btrace/dlopen.exp test. The issue is that we can't predict where we will be able to re-sync in case of errors. For the expected decode error in gdb.btrace/dlopen.exp, for example, we may be able to re-sync somewhere in dlclose, in test, in main, or not at all. Here's one example run of gdb.btrace/dlopen.exp with and without this patch. (gdb) info record Active record target: record-btrace Recording format: Intel Processor Trace. Buffer size: 16kB. warning: Non-contiguous trace at instruction 66608 (offset = 0xa83, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66652 (offset = 0xa9b, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66770 (offset = 0xacb, pc = 0xb7fdcc31). warning: Non-contiguous trace at instruction 66966 (offset = 0xb60, pc = 0xb7ff5ee4). warning: Non-contiguous trace at instruction 66994 (offset = 0xb74, pc = 0xb7ff5f24). warning: Non-contiguous trace at instruction 67334 (offset = 0xbac, pc = 0xb7ff5e6d). warning: Non-contiguous trace at instruction 69022 (offset = 0xc04, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69116 (offset = 0xc1c, pc = 0xb7ff60b3). warning: Non-contiguous trace at instruction 69504 (offset = 0xc74, pc = 0xb7ff605d). warning: Non-contiguous trace at instruction 83648 (offset = 0xecc, pc = 0xb7ff6134). warning: Decode error (-13) at instruction 83876 (offset = 0xf48, pc = 0xb7fd6380): no memory mapped at this address. warning: Non-contiguous trace at instruction 83876 (offset = 0x11b7, pc = 0xb7ff1c70). Recorded 83948 instructions in 912 functions (12 gaps) for thread 1 (process 12996). (gdb) record instruction-history 83876, +2 83876 => 0xb7fec46f <call_init.part.0+95>: call *%eax [decode error (-13): no memory mapped at this address] [disabled] 83877 0xb7ff1c70 <_dl_close_worker.part.0+1584>: nop Without the patch, the trace is disconnected and the backtrace is short: (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 Backtrace stopped: not enough registers or memory available to unwind further (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () With the patch, GDB is able to connect the trace pieces and we get a full backtrace. (gdb) record goto 83876 #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7fec46f in call_init.part () from /lib/ld-linux.so.2 #1 0xb7fec5d0 in _dl_init () from /lib/ld-linux.so.2 #2 0xb7ff0fe3 in dl_open_worker () from /lib/ld-linux.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7ff02e2 in _dl_open () from /lib/ld-linux.so.2 #5 0xb7fc3c65 in dlopen_doit () from /lib/libdl.so.2 #6 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #7 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #8 0xb7fc3d0e in dlopen@@GLIBC_2.1 () from /lib/libdl.so.2 #9 0xb7ff28ee in _dl_runtime_resolve () from /lib/ld-linux.so.2 #10 0x0804841c in ?? () #11 0x08048470 in dlopen@plt () #12 0x080485a3 in test () #13 0x08048628 in main () (gdb) record goto 83877 #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 (gdb) backtrace #0 0xb7ff1c70 in _dl_close_worker.part.0 () from /lib/ld-linux.so.2 #1 0xb7ff287a in _dl_close () from /lib/ld-linux.so.2 #2 0xb7fc3d5d in dlclose_doit () from /lib/libdl.so.2 #3 0xb7fec354 in _dl_catch_error () from /lib/ld-linux.so.2 #4 0xb7fc43dd in _dlerror_run () from /lib/libdl.so.2 #5 0xb7fc3d98 in dlclose () from /lib/libdl.so.2 #6 0x0804860a in test () #7 0x08048628 in main () It worked nicely in this case but it may, of course, also lead to weird connections; it is a heuristic, after all. It works best when the gap is small and the trace pieces are long. gdb/ * btrace.c (bfun_s): New typedef. (ftrace_update_caller): Print caller in debug dump. (ftrace_get_caller, ftrace_match_backtrace, ftrace_fixup_level) (ftrace_compute_global_level_offset, ftrace_connect_bfun) (ftrace_connect_backtrace, ftrace_bridge_gap, btrace_bridge_gaps): New. (btrace_compute_ftrace_bts): Pass vector of gaps. Collect gaps. (btrace_compute_ftrace_pt): Likewise. (btrace_compute_ftrace): Split into this, ... (btrace_compute_ftrace_1): ... this, and ... (btrace_finalize_ftrace): ... this. Call btrace_bridge_gaps.
2016-01-21 22:02:27 +08:00
}
/* Add an entry for the current PC. */
static void
btrace_add_pc (struct thread_info *tp)
{
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
struct btrace_data btrace;
struct btrace_block *block;
struct regcache *regcache;
struct cleanup *cleanup;
CORE_ADDR pc;
regcache = get_thread_regcache (tp->ptid);
pc = regcache_read_pc (regcache);
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
btrace_data_init (&btrace);
btrace.format = BTRACE_FORMAT_BTS;
btrace.variant.bts.blocks = NULL;
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
cleanup = make_cleanup_btrace_data (&btrace);
block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
block->begin = pc;
block->end = pc;
btrace_compute_ftrace (tp, &btrace);
do_cleanups (cleanup);
}
/* See btrace.h. */
void
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
{
if (tp->btrace.target != NULL)
return;
#if !defined (HAVE_LIBIPT)
if (conf->format == BTRACE_FORMAT_PT)
error (_("GDB does not support Intel Processor Trace."));
#endif /* !defined (HAVE_LIBIPT) */
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
if (!target_supports_btrace (conf->format))
error (_("Target does not support branch tracing."));
Centralize thread ID printing Add a new function to print a thread ID, in the style of paddress, plongest, etc. and adjust all CLI-reachable paths to use it. This gives us a single place to tweak to print inferior-qualified thread IDs later: - [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))] + [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))] etc., though for now, this has no user-visible change. No regressions on x86_64 Fedora 20. gdb/ChangeLog: 2016-01-13 Pedro Alves <palves@redhat.com> * breakpoint.c (remove_threaded_breakpoints) (print_one_breakpoint_location): Use print_thread_id. * btrace.c (btrace_enable, btrace_disable, btrace_teardown) (btrace_fetch, btrace_clear): Use print_thread_id. * common/print-utils.c (CELLSIZE): Delete. (get_cell): Rename to ... (get_print_cell): ... this and made extern. Adjust call callers. Adjust to use PRINT_CELL_SIZE. * common/print-utils.h (get_print_cell): Declare. (PRINT_CELL_SIZE): New. * gdbthread.h (print_thread_id): Declare. * infcmd.c (signal_command): Use print_thread_id. * inferior.c (print_inferior): Use print_thread_id. * infrun.c (handle_signal_stop) (insert_exception_resume_breakpoint) (insert_exception_resume_from_probe) (print_signal_received_reason): Use print_thread_id. * record-btrace.c (record_btrace_info) (record_btrace_resume_thread, record_btrace_cancel_resume) (record_btrace_step_thread, record_btrace_wait): Use print_thread_id. * thread.c (thread_apply_all_command): Use print_thread_id. (print_thread_id): New function. (thread_apply_command): Use print_thread_id. (thread_command, thread_find_command, do_captured_thread_select): Use print_thread_id.
2016-01-13 18:56:06 +08:00
DEBUG ("enable thread %s (%s)", print_thread_id (tp),
target_pid_to_str (tp->ptid));
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
tp->btrace.target = target_enable_btrace (tp->ptid, conf);
/* We're done if we failed to enable tracing. */
if (tp->btrace.target == NULL)
return;
/* We need to undo the enable in case of errors. */
TRY
{
/* Add an entry for the current PC so we start tracing from where we
enabled it.
If we can't access TP's registers, TP is most likely running. In this
case, we can't really say where tracing was enabled so it should be
safe to simply skip this step.
This is not relevant for BTRACE_FORMAT_PT since the trace will already
start at the PC at which tracing was enabled. */
if (conf->format != BTRACE_FORMAT_PT
&& can_access_registers_ptid (tp->ptid))
btrace_add_pc (tp);
}
CATCH (exception, RETURN_MASK_ALL)
{
btrace_disable (tp);
throw_exception (exception);
}
END_CATCH
}
/* See btrace.h. */
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
const struct btrace_config *
btrace_conf (const struct btrace_thread_info *btinfo)
{
if (btinfo->target == NULL)
return NULL;
return target_btrace_conf (btinfo->target);
}
/* See btrace.h. */
void
btrace_disable (struct thread_info *tp)
{
struct btrace_thread_info *btp = &tp->btrace;
int errcode = 0;
if (btp->target == NULL)
return;
Centralize thread ID printing Add a new function to print a thread ID, in the style of paddress, plongest, etc. and adjust all CLI-reachable paths to use it. This gives us a single place to tweak to print inferior-qualified thread IDs later: - [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))] + [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))] etc., though for now, this has no user-visible change. No regressions on x86_64 Fedora 20. gdb/ChangeLog: 2016-01-13 Pedro Alves <palves@redhat.com> * breakpoint.c (remove_threaded_breakpoints) (print_one_breakpoint_location): Use print_thread_id. * btrace.c (btrace_enable, btrace_disable, btrace_teardown) (btrace_fetch, btrace_clear): Use print_thread_id. * common/print-utils.c (CELLSIZE): Delete. (get_cell): Rename to ... (get_print_cell): ... this and made extern. Adjust call callers. Adjust to use PRINT_CELL_SIZE. * common/print-utils.h (get_print_cell): Declare. (PRINT_CELL_SIZE): New. * gdbthread.h (print_thread_id): Declare. * infcmd.c (signal_command): Use print_thread_id. * inferior.c (print_inferior): Use print_thread_id. * infrun.c (handle_signal_stop) (insert_exception_resume_breakpoint) (insert_exception_resume_from_probe) (print_signal_received_reason): Use print_thread_id. * record-btrace.c (record_btrace_info) (record_btrace_resume_thread, record_btrace_cancel_resume) (record_btrace_step_thread, record_btrace_wait): Use print_thread_id. * thread.c (thread_apply_all_command): Use print_thread_id. (print_thread_id): New function. (thread_apply_command): Use print_thread_id. (thread_command, thread_find_command, do_captured_thread_select): Use print_thread_id.
2016-01-13 18:56:06 +08:00
DEBUG ("disable thread %s (%s)", print_thread_id (tp),
target_pid_to_str (tp->ptid));
target_disable_btrace (btp->target);
btp->target = NULL;
btrace_clear (tp);
}
/* See btrace.h. */
void
btrace_teardown (struct thread_info *tp)
{
struct btrace_thread_info *btp = &tp->btrace;
int errcode = 0;
if (btp->target == NULL)
return;
Centralize thread ID printing Add a new function to print a thread ID, in the style of paddress, plongest, etc. and adjust all CLI-reachable paths to use it. This gives us a single place to tweak to print inferior-qualified thread IDs later: - [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))] + [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))] etc., though for now, this has no user-visible change. No regressions on x86_64 Fedora 20. gdb/ChangeLog: 2016-01-13 Pedro Alves <palves@redhat.com> * breakpoint.c (remove_threaded_breakpoints) (print_one_breakpoint_location): Use print_thread_id. * btrace.c (btrace_enable, btrace_disable, btrace_teardown) (btrace_fetch, btrace_clear): Use print_thread_id. * common/print-utils.c (CELLSIZE): Delete. (get_cell): Rename to ... (get_print_cell): ... this and made extern. Adjust call callers. Adjust to use PRINT_CELL_SIZE. * common/print-utils.h (get_print_cell): Declare. (PRINT_CELL_SIZE): New. * gdbthread.h (print_thread_id): Declare. * infcmd.c (signal_command): Use print_thread_id. * inferior.c (print_inferior): Use print_thread_id. * infrun.c (handle_signal_stop) (insert_exception_resume_breakpoint) (insert_exception_resume_from_probe) (print_signal_received_reason): Use print_thread_id. * record-btrace.c (record_btrace_info) (record_btrace_resume_thread, record_btrace_cancel_resume) (record_btrace_step_thread, record_btrace_wait): Use print_thread_id. * thread.c (thread_apply_all_command): Use print_thread_id. (print_thread_id): New function. (thread_apply_command): Use print_thread_id. (thread_command, thread_find_command, do_captured_thread_select): Use print_thread_id.
2016-01-13 18:56:06 +08:00
DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
target_pid_to_str (tp->ptid));
target_teardown_btrace (btp->target);
btp->target = NULL;
btrace_clear (tp);
}
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
/* Stitch branch trace in BTS format. */
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
static int
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
{
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
struct btrace_thread_info *btinfo;
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
struct btrace_function *last_bfun;
btrace_block_s *first_new_block;
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
btinfo = &tp->btrace;
gdb_assert (!btinfo->functions.empty ());
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
last_bfun = &btinfo->functions.back ();
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* If the existing trace ends with a gap, we just glue the traces
together. We need to drop the last (i.e. chronologically first) block
of the new trace, though, since we can't fill in the start address.*/
if (last_bfun->insn.empty ())
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
{
VEC_pop (btrace_block_s, btrace->blocks);
return 0;
}
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
/* Beware that block trace starts with the most recent block, so the
chronologically first block in the new trace is the last block in
the new trace's block vector. */
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
first_new_block = VEC_last (btrace_block_s, btrace->blocks);
const btrace_insn &last_insn = last_bfun->insn.back ();
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
/* If the current PC at the end of the block is the same as in our current
trace, there are two explanations:
1. we executed the instruction and some branch brought us back.
2. we have not made any progress.
In the first case, the delta trace vector should contain at least two
entries.
In the second case, the delta trace vector should contain exactly one
entry for the partial block containing the current PC. Remove it. */
if (first_new_block->end == last_insn.pc
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
&& VEC_length (btrace_block_s, btrace->blocks) == 1)
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
{
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
VEC_pop (btrace_block_s, btrace->blocks);
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
return 0;
}
DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
core_addr_to_string_nz (first_new_block->end));
/* Do a simple sanity check to make sure we don't accidentally end up
with a bad block. This should not occur in practice. */
if (first_new_block->end < last_insn.pc)
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
{
warning (_("Error while trying to read delta trace. Falling back to "
"a full read."));
return -1;
}
/* We adjust the last block to start at the end of our current trace. */
gdb_assert (first_new_block->begin == 0);
first_new_block->begin = last_insn.pc;
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
/* We simply pop the last insn so we can insert it again as part of
the normal branch trace computation.
Since instruction iterators are based on indices in the instructions
vector, we don't leave any pointers dangling. */
DEBUG ("pruning insn at %s for stitching",
ftrace_print_insn_addr (&last_insn));
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
last_bfun->insn.pop_back ();
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
/* The instructions vector may become empty temporarily if this has
been the only instruction in this function segment.
This violates the invariant but will be remedied shortly by
btrace_compute_ftrace when we add the new trace. */
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* The only case where this would hurt is if the entire trace consisted
of just that one instruction. If we remove it, we might turn the now
empty btrace function segment into a gap. But we don't want gaps at
the beginning. To avoid this, we remove the entire old trace. */
if (last_bfun->number == 1 && last_bfun->insn.empty ())
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
btrace_clear (tp);
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
return 0;
}
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
/* Adjust the block trace in order to stitch old and new trace together.
BTRACE is the new delta trace between the last and the current stop.
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
TP is the traced thread.
May modifx BTRACE as well as the existing trace in TP.
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
Return 0 on success, -1 otherwise. */
static int
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
{
/* If we don't have trace, there's nothing to do. */
if (btrace_data_empty (btrace))
return 0;
switch (btrace->format)
{
case BTRACE_FORMAT_NONE:
return 0;
case BTRACE_FORMAT_BTS:
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
return btrace_stitch_bts (&btrace->variant.bts, tp);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
case BTRACE_FORMAT_PT:
/* Delta reads are not supported. */
return -1;
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
}
internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
}
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
/* Clear the branch trace histories in BTINFO. */
static void
btrace_clear_history (struct btrace_thread_info *btinfo)
{
xfree (btinfo->insn_history);
xfree (btinfo->call_history);
xfree (btinfo->replay);
btinfo->insn_history = NULL;
btinfo->call_history = NULL;
btinfo->replay = NULL;
}
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
/* Clear the branch trace maintenance histories in BTINFO. */
static void
btrace_maint_clear (struct btrace_thread_info *btinfo)
{
switch (btinfo->data.format)
{
default:
break;
case BTRACE_FORMAT_BTS:
btinfo->maint.variant.bts.packet_history.begin = 0;
btinfo->maint.variant.bts.packet_history.end = 0;
break;
#if defined (HAVE_LIBIPT)
case BTRACE_FORMAT_PT:
xfree (btinfo->maint.variant.pt.packets);
btinfo->maint.variant.pt.packets = NULL;
btinfo->maint.variant.pt.packet_history.begin = 0;
btinfo->maint.variant.pt.packet_history.end = 0;
break;
#endif /* defined (HAVE_LIBIPT) */
}
}
/* See btrace.h. */
const char *
btrace_decode_error (enum btrace_format format, int errcode)
{
switch (format)
{
case BTRACE_FORMAT_BTS:
switch (errcode)
{
case BDE_BTS_OVERFLOW:
return _("instruction overflow");
case BDE_BTS_INSN_SIZE:
return _("unknown instruction");
default:
break;
}
break;
#if defined (HAVE_LIBIPT)
case BTRACE_FORMAT_PT:
switch (errcode)
{
case BDE_PT_USER_QUIT:
return _("trace decode cancelled");
case BDE_PT_DISABLED:
return _("disabled");
case BDE_PT_OVERFLOW:
return _("overflow");
default:
if (errcode < 0)
return pt_errstr (pt_errcode (errcode));
break;
}
break;
#endif /* defined (HAVE_LIBIPT) */
default:
break;
}
return _("unknown");
}
/* See btrace.h. */
void
btrace_fetch (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
struct btrace_target_info *tinfo;
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
struct btrace_data btrace;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
struct cleanup *cleanup;
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
int errcode;
Centralize thread ID printing Add a new function to print a thread ID, in the style of paddress, plongest, etc. and adjust all CLI-reachable paths to use it. This gives us a single place to tweak to print inferior-qualified thread IDs later: - [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))] + [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))] etc., though for now, this has no user-visible change. No regressions on x86_64 Fedora 20. gdb/ChangeLog: 2016-01-13 Pedro Alves <palves@redhat.com> * breakpoint.c (remove_threaded_breakpoints) (print_one_breakpoint_location): Use print_thread_id. * btrace.c (btrace_enable, btrace_disable, btrace_teardown) (btrace_fetch, btrace_clear): Use print_thread_id. * common/print-utils.c (CELLSIZE): Delete. (get_cell): Rename to ... (get_print_cell): ... this and made extern. Adjust call callers. Adjust to use PRINT_CELL_SIZE. * common/print-utils.h (get_print_cell): Declare. (PRINT_CELL_SIZE): New. * gdbthread.h (print_thread_id): Declare. * infcmd.c (signal_command): Use print_thread_id. * inferior.c (print_inferior): Use print_thread_id. * infrun.c (handle_signal_stop) (insert_exception_resume_breakpoint) (insert_exception_resume_from_probe) (print_signal_received_reason): Use print_thread_id. * record-btrace.c (record_btrace_info) (record_btrace_resume_thread, record_btrace_cancel_resume) (record_btrace_step_thread, record_btrace_wait): Use print_thread_id. * thread.c (thread_apply_all_command): Use print_thread_id. (print_thread_id): New function. (thread_apply_command): Use print_thread_id. (thread_command, thread_find_command, do_captured_thread_select): Use print_thread_id.
2016-01-13 18:56:06 +08:00
DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
target_pid_to_str (tp->ptid));
btinfo = &tp->btrace;
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
tinfo = btinfo->target;
if (tinfo == NULL)
return;
/* There's no way we could get new trace while replaying.
On the other hand, delta trace would return a partial record with the
current PC, which is the replay PC, not the last PC, as expected. */
if (btinfo->replay != NULL)
return;
/* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
can store a gdb.Record object in Python referring to a different thread
than the current one, temporarily set INFERIOR_PTID. */
Remove save_inferior_ptid This removes save_inferior_ptid, a cleanup function, in favor of scoped_restore. This also fixes a possible (it seems unlikely that it could happen in practice) memory leak -- save_inferior_ptid should have used make_cleanup_dtor, because it allocated memory. I tested this on the buildbot. However, there are two caveats to this. First, sometimes it seems I misread the results. Second, I think this patch touches some platforms that can't be tested by the buildbot. So, extra care seems warranted. ChangeLog 2017-08-18 Tom Tromey <tom@tromey.com> Pedro Alves <palves@redhat.com> * spu-multiarch.c (parse_spufs_run): Use scoped_restore. * sol-thread.c (sol_thread_resume, sol_thread_wait) (sol_thread_xfer_partial, rw_common): Use scoped_restore. * procfs.c (procfs_do_thread_registers): Use scoped_restore. * proc-service.c (ps_xfer_memory): Use scoped_restore. * linux-tdep.c (linux_corefile_thread): Remove a cleanup. (linux_get_siginfo_data): Add "thread" argument. Use scoped_restore. * linux-nat.c (linux_child_follow_fork) (check_stopped_by_watchpoint): Use scoped_restore. * infrun.c (displaced_step_prepare_throw, write_memory_ptid) (THREAD_STOPPED_BY, handle_signal_stop): Use scoped_restore. (restore_inferior_ptid, save_inferior_ptid): Remove. * btrace.c (btrace_fetch): Use scoped_restore. * bsd-uthread.c (bsd_uthread_fetch_registers) (bsd_uthread_store_registers): Use scoped_restore. * breakpoint.c (reattach_breakpoints, detach_breakpoints): Use scoped_restore. * aix-thread.c (aix_thread_resume, aix_thread_wait) (aix_thread_xfer_partial): Use scoped_restore. * inferior.h (save_inferior_ptid): Remove.
2017-08-16 13:36:09 +08:00
scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
inferior_ptid = tp->ptid;
/* We should not be called on running or exited threads. */
gdb_assert (can_access_registers_ptid (tp->ptid));
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
btrace_data_init (&btrace);
Remove save_inferior_ptid This removes save_inferior_ptid, a cleanup function, in favor of scoped_restore. This also fixes a possible (it seems unlikely that it could happen in practice) memory leak -- save_inferior_ptid should have used make_cleanup_dtor, because it allocated memory. I tested this on the buildbot. However, there are two caveats to this. First, sometimes it seems I misread the results. Second, I think this patch touches some platforms that can't be tested by the buildbot. So, extra care seems warranted. ChangeLog 2017-08-18 Tom Tromey <tom@tromey.com> Pedro Alves <palves@redhat.com> * spu-multiarch.c (parse_spufs_run): Use scoped_restore. * sol-thread.c (sol_thread_resume, sol_thread_wait) (sol_thread_xfer_partial, rw_common): Use scoped_restore. * procfs.c (procfs_do_thread_registers): Use scoped_restore. * proc-service.c (ps_xfer_memory): Use scoped_restore. * linux-tdep.c (linux_corefile_thread): Remove a cleanup. (linux_get_siginfo_data): Add "thread" argument. Use scoped_restore. * linux-nat.c (linux_child_follow_fork) (check_stopped_by_watchpoint): Use scoped_restore. * infrun.c (displaced_step_prepare_throw, write_memory_ptid) (THREAD_STOPPED_BY, handle_signal_stop): Use scoped_restore. (restore_inferior_ptid, save_inferior_ptid): Remove. * btrace.c (btrace_fetch): Use scoped_restore. * bsd-uthread.c (bsd_uthread_fetch_registers) (bsd_uthread_store_registers): Use scoped_restore. * breakpoint.c (reattach_breakpoints, detach_breakpoints): Use scoped_restore. * aix-thread.c (aix_thread_resume, aix_thread_wait) (aix_thread_xfer_partial): Use scoped_restore. * inferior.h (save_inferior_ptid): Remove.
2017-08-16 13:36:09 +08:00
cleanup = make_cleanup_btrace_data (&btrace);
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
/* Let's first try to extend the trace we already have. */
if (!btinfo->functions.empty ())
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
{
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
if (errcode == 0)
{
/* Success. Let's try to stitch the traces together. */
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
errcode = btrace_stitch_trace (&btrace, tp);
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
}
else
{
/* We failed to read delta trace. Let's try to read new trace. */
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
/* If we got any new trace, discard what we have. */
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
if (errcode == 0 && !btrace_data_empty (&btrace))
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
btrace_clear (tp);
}
/* If we were not able to read the trace, we start over. */
if (errcode != 0)
{
btrace_clear (tp);
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
}
}
else
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
/* If we were not able to read the branch trace, signal an error. */
if (errcode != 0)
error (_("Failed to read branch trace."));
/* Compute the trace, provided we have any. */
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
if (!btrace_data_empty (&btrace))
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
/* Store the raw trace data. The stored data will be cleared in
btrace_clear, so we always append the new trace. */
btrace_data_append (&btinfo->data, &btrace);
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
btrace_maint_clear (btinfo);
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
btrace_clear_history (btinfo);
btrace_compute_ftrace (tp, &btrace);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
do_cleanups (cleanup);
}
/* See btrace.h. */
void
btrace_clear (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
Centralize thread ID printing Add a new function to print a thread ID, in the style of paddress, plongest, etc. and adjust all CLI-reachable paths to use it. This gives us a single place to tweak to print inferior-qualified thread IDs later: - [Switching to thread 1 (Thread 0x7ffff7fc2740 (LWP 8155))] + [Switching to thread 1.1 (Thread 0x7ffff7fc2740 (LWP 8155))] etc., though for now, this has no user-visible change. No regressions on x86_64 Fedora 20. gdb/ChangeLog: 2016-01-13 Pedro Alves <palves@redhat.com> * breakpoint.c (remove_threaded_breakpoints) (print_one_breakpoint_location): Use print_thread_id. * btrace.c (btrace_enable, btrace_disable, btrace_teardown) (btrace_fetch, btrace_clear): Use print_thread_id. * common/print-utils.c (CELLSIZE): Delete. (get_cell): Rename to ... (get_print_cell): ... this and made extern. Adjust call callers. Adjust to use PRINT_CELL_SIZE. * common/print-utils.h (get_print_cell): Declare. (PRINT_CELL_SIZE): New. * gdbthread.h (print_thread_id): Declare. * infcmd.c (signal_command): Use print_thread_id. * inferior.c (print_inferior): Use print_thread_id. * infrun.c (handle_signal_stop) (insert_exception_resume_breakpoint) (insert_exception_resume_from_probe) (print_signal_received_reason): Use print_thread_id. * record-btrace.c (record_btrace_info) (record_btrace_resume_thread, record_btrace_cancel_resume) (record_btrace_step_thread, record_btrace_wait): Use print_thread_id. * thread.c (thread_apply_all_command): Use print_thread_id. (print_thread_id): New function. (thread_apply_command): Use print_thread_id. (thread_command, thread_find_command, do_captured_thread_select): Use print_thread_id.
2016-01-13 18:56:06 +08:00
DEBUG ("clear thread %s (%s)", print_thread_id (tp),
target_pid_to_str (tp->ptid));
/* Make sure btrace frames that may hold a pointer into the branch
trace data are destroyed. */
reinit_frame_cache ();
btinfo = &tp->btrace;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
btinfo->functions.clear ();
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
btinfo->ngaps = 0;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
/* Must clear the maint data before - it depends on BTINFO->DATA. */
btrace_maint_clear (btinfo);
btrace_data_clear (&btinfo->data);
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
btrace_clear_history (btinfo);
}
/* See btrace.h. */
void
btrace_free_objfile (struct objfile *objfile)
{
struct thread_info *tp;
DEBUG ("free objfile");
Fix next over threaded execl with "set scheduler-locking step". Running gdb.threads/thread-execl.exp with scheduler-locking set to "step" reveals a problem: (gdb) next^M [Thread 0x7ffff7fda700 (LWP 27168) exited]^M [New LWP 27168]^M [Thread 0x7ffff74ee700 (LWP 27174) exited]^M process 27168 is executing new program: /home/jkratoch/redhat/gdb-clean/gdb/testsuite/gdb.threads/thread-execl^M [Thread debugging using libthread_db enabled]^M Using host libthread_db library "/lib64/libthread_db.so.1".^M infrun.c:5225: internal-error: switch_back_to_stepped_thread: Assertion `!schedlock_applies (1)' failed.^M A problem internal to GDB has been detected,^M further debugging may prove unreliable.^M Quit this debugging session? (y or n) FAIL: gdb.threads/thread-execl.exp: schedlock step: get to main in new image (GDB internal error) The assertion is correct. The issue is that GDB is mistakenly trying to switch back to an exited thread, that was previously stepping when it exited. This is exactly the sort of thing the test wants to make sure doesn't happen: # Now set a breakpoint at `main', and step over the execl call. The # breakpoint at main should be reached. GDB should not try to revert # back to the old thread from the old image and resume stepping it We don't see this bug with schedlock off only because a different sequence of events makes GDB manage to delete the thread instead of marking it exited. This particular internal error can be fixed by making the loop over all threads in switch_back_to_stepped_thread skip exited threads. But, looking over other ALL_THREADS users, all either can or should be skipping exited threads too. So for simplicity, this patch replaces ALL_THREADS with a new macro that skips exited threads itself, and updates everything to use it. Tested on x86_64 Fedora 20. gdb/ 2014-06-19 Pedro Alves <palves@redhat.com> * gdbthread.h (ALL_THREADS): Delete. (ALL_NON_EXITED_THREADS): New macro. * btrace.c (btrace_free_objfile): Use ALL_NON_EXITED_THREADS instead of ALL_THREADS. * infrun.c (find_thread_needs_step_over) (switch_back_to_stepped_thread): Use ALL_NON_EXITED_THREADS instead of ALL_THREADS. * record-btrace.c (record_btrace_open) (record_btrace_stop_recording, record_btrace_close) (record_btrace_is_replaying, record_btrace_resume) (record_btrace_find_thread_to_move, record_btrace_wait): Likewise. * remote.c (append_pending_thread_resumptions): Likewise. * thread.c (thread_apply_all_command): Likewise. gdb/testsuite/ 2014-06-19 Pedro Alves <palves@redhat.com> * gdb.threads/thread-execl.exp (do_test): New procedure, factored out from ... (top level): ... here. Iterate running tests under different scheduler-locking settings.
2014-06-19 18:59:03 +08:00
ALL_NON_EXITED_THREADS (tp)
btrace_clear (tp);
}
#if defined (HAVE_LIBEXPAT)
/* Check the btrace document version. */
static void
check_xml_btrace_version (struct gdb_xml_parser *parser,
const struct gdb_xml_element *element,
void *user_data, VEC (gdb_xml_value_s) *attributes)
{
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
const char *version
= (const char *) xml_find_attribute (attributes, "version")->value;
if (strcmp (version, "1.0") != 0)
gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
}
/* Parse a btrace "block" xml record. */
static void
parse_xml_btrace_block (struct gdb_xml_parser *parser,
const struct gdb_xml_element *element,
void *user_data, VEC (gdb_xml_value_s) *attributes)
{
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
struct btrace_data *btrace;
struct btrace_block *block;
ULONGEST *begin, *end;
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
btrace = (struct btrace_data *) user_data;
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
switch (btrace->format)
{
case BTRACE_FORMAT_BTS:
break;
case BTRACE_FORMAT_NONE:
btrace->format = BTRACE_FORMAT_BTS;
btrace->variant.bts.blocks = NULL;
break;
default:
gdb_xml_error (parser, _("Btrace format error."));
}
begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
block->begin = *begin;
block->end = *end;
}
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
/* Parse a "raw" xml record. */
static void
parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
gdb_byte **pdata, size_t *psize)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
{
struct cleanup *cleanup;
gdb_byte *data, *bin;
size_t len, size;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
len = strlen (body_text);
if (len % 2 != 0)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
gdb_xml_error (parser, _("Bad raw data size."));
size = len / 2;
Add casts to memory allocation related calls Most allocation functions (if not all) return a void* pointing to the allocated memory. In C++, we need to add an explicit cast when assigning the result to a pointer to another type (which is the case more often than not). The content of this patch is taken from Pedro's branch, from commit "(mostly) auto-generated patch to insert casts needed for C++". I validated that the changes make sense and manually reflowed the code to make it respect the coding style. I also found multiple places where I could use XNEW/XNEWVEC/XRESIZEVEC/etc. Thanks a lot to whoever did that automated script to insert casts, doing it completely by hand would have taken a ridiculous amount of time. Only files built on x86 with --enable-targets=all are modified. This means that all other -nat.c files are untouched and will have to be dealt with later by using appropiate compilers. Or maybe we can try to build them with a regular g++ just to know where to add casts, I don't know. I built-tested this with --enable-targets=all and reg-tested. Here's the changelog entry, which was not too bad to make despite the size, thanks to David Malcom's script. I fixed some bits by hand, but there might be some wrong parts left (hopefully not). gdb/ChangeLog: * aarch64-linux-tdep.c (aarch64_stap_parse_special_token): Add cast to allocation result assignment. * ada-exp.y (write_object_renaming): Likewise. (write_ambiguous_var): Likewise. (ada_nget_field_index): Likewise. (write_var_or_type): Likewise. * ada-lang.c (ada_decode_symbol): Likewise. (ada_value_assign): Likewise. (value_pointer): Likewise. (cache_symbol): Likewise. (add_nonlocal_symbols): Likewise. (ada_name_for_lookup): Likewise. (symbol_completion_add): Likewise. (ada_to_fixed_type_1): Likewise. (ada_get_next_arg): Likewise. (defns_collected): Likewise. * ada-lex.l (processId): Likewise. (processString): Likewise. * ada-tasks.c (read_known_tasks_array): Likewise. (read_known_tasks_list): Likewise. * ada-typeprint.c (decoded_type_name): Likewise. * addrmap.c (addrmap_mutable_create_fixed): Likewise. * amd64-tdep.c (amd64_push_arguments): Likewise. (amd64_displaced_step_copy_insn): Likewise. (amd64_classify_insn_at): Likewise. (amd64_relocate_instruction): Likewise. * amd64obsd-tdep.c (amd64obsd_sigtramp_p): Likewise. * arch-utils.c (simple_displaced_step_copy_insn): Likewise. (initialize_current_architecture): Likewise. * arm-linux-tdep.c (arm_stap_parse_special_token): Likewise. * arm-symbian-tdep.c (arm_symbian_osabi_sniffer): Likewise. * arm-tdep.c (arm_exidx_new_objfile): Likewise. (arm_push_dummy_call): Likewise. (extend_buffer_earlier): Likewise. (arm_adjust_breakpoint_address): Likewise. (arm_skip_stub): Likewise. * auto-load.c (filename_is_in_pattern): Likewise. (maybe_add_script_file): Likewise. (maybe_add_script_text): Likewise. (auto_load_objfile_script_1): Likewise. * auxv.c (ld_so_xfer_auxv): Likewise. * ax-general.c (new_agent_expr): Likewise. (grow_expr): Likewise. (ax_reg_mask): Likewise. * bcache.c (bcache_full): Likewise. * breakpoint.c (program_breakpoint_here_p): Likewise. * btrace.c (parse_xml_raw): Likewise. * build-id.c (build_id_to_debug_bfd): Likewise. * buildsym.c (end_symtab_with_blockvector): Likewise. * c-exp.y (string_exp): Likewise. (qualified_name): Likewise. (write_destructor_name): Likewise. (operator_stoken): Likewise. (parse_number): Likewise. (scan_macro_expansion): Likewise. (yylex): Likewise. (c_print_token): Likewise. * c-lang.c (c_get_string): Likewise. (emit_numeric_character): Likewise. * charset.c (wchar_iterate): Likewise. * cli/cli-cmds.c (complete_command): Likewise. (make_command): Likewise. * cli/cli-dump.c (restore_section_callback): Likewise. (restore_binary_file): Likewise. * cli/cli-interp.c (cli_interpreter_exec): Likewise. * cli/cli-script.c (execute_control_command): Likewise. * cli/cli-setshow.c (do_set_command): Likewise. * coff-pe-read.c (add_pe_forwarded_sym): Likewise. (read_pe_exported_syms): Likewise. * coffread.c (coff_read_struct_type): Likewise. (coff_read_enum_type): Likewise. * common/btrace-common.c (btrace_data_append): Likewise. * common/buffer.c (buffer_grow): Likewise. * common/filestuff.c (gdb_fopen_cloexec): Likewise. * common/format.c (parse_format_string): Likewise. * common/gdb_vecs.c (delim_string_to_char_ptr_vec_append): Likewise. * common/xml-utils.c (xml_escape_text): Likewise. * compile/compile-object-load.c (copy_sections): Likewise. (compile_object_load): Likewise. * compile/compile-object-run.c (compile_object_run): Likewise. * completer.c (filename_completer): Likewise. * corefile.c (read_memory_typed_address): Likewise. (write_memory_unsigned_integer): Likewise. (write_memory_signed_integer): Likewise. (complete_set_gnutarget): Likewise. * corelow.c (get_core_register_section): Likewise. * cp-name-parser.y (d_grab): Likewise. (allocate_info): Likewise. (cp_new_demangle_parse_info): Likewise. * cp-namespace.c (cp_scan_for_anonymous_namespaces): Likewise. (cp_lookup_symbol_in_namespace): Likewise. (lookup_namespace_scope): Likewise. (find_symbol_in_baseclass): Likewise. (cp_lookup_nested_symbol): Likewise. (cp_lookup_transparent_type_loop): Likewise. * cp-support.c (copy_string_to_obstack): Likewise. (make_symbol_overload_list): Likewise. (make_symbol_overload_list_namespace): Likewise. (make_symbol_overload_list_adl_namespace): Likewise. (first_component_command): Likewise. * cp-valprint.c (cp_print_value): Likewise. * ctf.c (ctf_xfer_partial): Likewise. * d-exp.y (StringExp): Likewise. * d-namespace.c (d_lookup_symbol_in_module): Likewise. (lookup_module_scope): Likewise. (find_symbol_in_baseclass): Likewise. (d_lookup_nested_symbol): Likewise. * dbxread.c (find_stab_function_addr): Likewise. (read_dbx_symtab): Likewise. (dbx_end_psymtab): Likewise. (cp_set_block_scope): Likewise. * dcache.c (dcache_alloc): Likewise. * demangle.c (_initialize_demangler): Likewise. * dicos-tdep.c (dicos_load_module_p): Likewise. * dictionary.c (dict_create_hashed_expandable): Likewise. (dict_create_linear_expandable): Likewise. (expand_hashtable): Likewise. (add_symbol_linear_expandable): Likewise. * dwarf2-frame.c (add_cie): Likewise. (add_fde): Likewise. (dwarf2_build_frame_info): Likewise. * dwarf2expr.c (dwarf_expr_grow_stack): Likewise. (dwarf_expr_fetch_address): Likewise. (add_piece): Likewise. (execute_stack_op): Likewise. * dwarf2loc.c (chain_candidate): Likewise. (dwarf_entry_parameter_to_value): Likewise. (read_pieced_value): Likewise. (write_pieced_value): Likewise. * dwarf2read.c (dwarf2_read_section): Likewise. (add_type_unit): Likewise. (read_comp_units_from_section): Likewise. (fixup_go_packaging): Likewise. (dwarf2_compute_name): Likewise. (dwarf2_physname): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (read_func_scope): Likewise. (read_call_site_scope): Likewise. (dwarf2_attach_fields_to_type): Likewise. (process_structure_scope): Likewise. (mark_common_block_symbol_computed): Likewise. (read_common_block): Likewise. (abbrev_table_read_table): Likewise. (guess_partial_die_structure_name): Likewise. (fixup_partial_die): Likewise. (add_file_name): Likewise. (dwarf2_const_value_data): Likewise. (dwarf2_const_value_attr): Likewise. (build_error_marker_type): Likewise. (guess_full_die_structure_name): Likewise. (anonymous_struct_prefix): Likewise. (typename_concat): Likewise. (dwarf2_canonicalize_name): Likewise. (dwarf2_name): Likewise. (write_constant_as_bytes): Likewise. (dwarf2_fetch_constant_bytes): Likewise. (copy_string): Likewise. (parse_macro_definition): Likewise. * elfread.c (elf_symfile_segments): Likewise. (elf_rel_plt_read): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_gnu_ifunc_resolve_by_got): Likewise. (elf_read_minimal_symbols): Likewise. (elf_gnu_ifunc_record_cache): Likewise. * event-top.c (top_level_prompt): Likewise. (command_line_handler): Likewise. * exec.c (resize_section_table): Likewise. * expprint.c (print_subexp_standard): Likewise. * fbsd-tdep.c (fbsd_collect_regset_section_cb): Likewise. * findcmd.c (parse_find_args): Likewise. * findvar.c (address_from_register): Likewise. * frame.c (get_prev_frame_always): Likewise. * gdb_bfd.c (gdb_bfd_ref): Likewise. (get_section_descriptor): Likewise. * gdb_obstack.c (obconcat): Likewise. (obstack_strdup): Likewise. * gdbtypes.c (lookup_function_type_with_arguments): Likewise. (create_set_type): Likewise. (lookup_unsigned_typename): Likewise. (lookup_signed_typename): Likewise. (resolve_dynamic_union): Likewise. (resolve_dynamic_struct): Likewise. (add_dyn_prop): Likewise. (copy_dynamic_prop_list): Likewise. (arch_flags_type): Likewise. (append_composite_type_field_raw): Likewise. * gdbtypes.h (INIT_FUNC_SPECIFIC): Likewise. * gnu-v3-abi.c (gnuv3_rtti_type): Likewise. * go-exp.y (string_exp): Likewise. * go-lang.c (go_demangle): Likewise. * guile/guile.c (compute_scheme_string): Likewise. * guile/scm-cmd.c (gdbscm_parse_command_name): Likewise. (gdbscm_canonicalize_command_name): Likewise. * guile/scm-ports.c (ioscm_init_stdio_buffers): Likewise. (ioscm_init_memory_port): Likewise. (ioscm_reinit_memory_port): Likewise. * guile/scm-utils.c (gdbscm_gc_xstrdup): Likewise. (gdbscm_gc_dup_argv): Likewise. * h8300-tdep.c (h8300_push_dummy_call): Likewise. * hppa-tdep.c (internalize_unwinds): Likewise. (read_unwind_info): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. (windows_core_xfer_shared_libraries): Likewise. * i386-tdep.c (i386_displaced_step_copy_insn): Likewise. (i386_stap_parse_special_token_triplet): Likewise. (i386_stap_parse_special_token_three_arg_disp): Likewise. * i386obsd-tdep.c (i386obsd_sigtramp_p): Likewise. * inf-child.c (inf_child_fileio_readlink): Likewise. * inf-ptrace.c (inf_ptrace_fetch_register): Likewise. (inf_ptrace_store_register): Likewise. * infrun.c (follow_exec): Likewise. (displaced_step_prepare_throw): Likewise. (save_stop_context): Likewise. (save_infcall_suspend_state): Likewise. * jit.c (jit_read_descriptor): Likewise. (jit_read_code_entry): Likewise. (jit_symtab_line_mapping_add_impl): Likewise. (finalize_symtab): Likewise. (jit_unwind_reg_get_impl): Likewise. * jv-exp.y (QualifiedName): Likewise. * jv-lang.c (get_java_utf8_name): Likewise. (type_from_class): Likewise. (java_demangle_type_signature): Likewise. (java_class_name_from_physname): Likewise. * jv-typeprint.c (java_type_print_base): Likewise. * jv-valprint.c (java_value_print): Likewise. * language.c (add_language): Likewise. * linespec.c (add_sal_to_sals_basic): Likewise. (add_sal_to_sals): Likewise. (decode_objc): Likewise. (find_linespec_symbols): Likewise. * linux-fork.c (fork_save_infrun_state): Likewise. * linux-nat.c (linux_nat_detach): Likewise. (linux_nat_fileio_readlink): Likewise. * linux-record.c (record_linux_sockaddr): Likewise. (record_linux_msghdr): Likewise. (Do): Likewise. * linux-tdep.c (linux_core_info_proc_mappings): Likewise. (linux_collect_regset_section_cb): Likewise. (linux_get_siginfo_data): Likewise. * linux-thread-db.c (try_thread_db_load_from_pdir_1): Likewise. (try_thread_db_load_from_dir): Likewise. (thread_db_load_search): Likewise. (info_auto_load_libthread_db): Likewise. * m32c-tdep.c (m32c_m16c_address_to_pointer): Likewise. (m32c_m16c_pointer_to_address): Likewise. * m68hc11-tdep.c (m68hc11_pseudo_register_write): Likewise. * m68k-tdep.c (m68k_get_longjmp_target): Likewise. * machoread.c (macho_check_dsym): Likewise. * macroexp.c (resize_buffer): Likewise. (gather_arguments): Likewise. (maybe_expand): Likewise. * macrotab.c (new_macro_key): Likewise. (new_source_file): Likewise. (new_macro_definition): Likewise. * mdebugread.c (parse_symbol): Likewise. (parse_type): Likewise. (parse_partial_symbols): Likewise. (psymtab_to_symtab_1): Likewise. * mem-break.c (default_memory_insert_breakpoint): Likewise. * mi/mi-cmd-break.c (mi_argv_to_format): Likewise. * mi/mi-main.c (mi_cmd_data_read_memory): Likewise. (mi_cmd_data_read_memory_bytes): Likewise. (mi_cmd_data_write_memory_bytes): Likewise. (mi_cmd_trace_frame_collected): Likewise. * mi/mi-parse.c (mi_parse_argv): Likewise. (mi_parse): Likewise. * minidebug.c (lzma_open): Likewise. (lzma_pread): Likewise. * mips-tdep.c (mips_read_fp_register_single): Likewise. (mips_print_fp_register): Likewise. * mipsnbsd-tdep.c (mipsnbsd_get_longjmp_target): Likewise. * mipsread.c (read_alphacoff_dynamic_symtab): Likewise. * mt-tdep.c (mt_register_name): Likewise. (mt_registers_info): Likewise. (mt_push_dummy_call): Likewise. * namespace.c (add_using_directive): Likewise. * nat/linux-btrace.c (perf_event_read): Likewise. (linux_enable_bts): Likewise. * nat/linux-osdata.c (linux_common_core_of_thread): Likewise. * nat/linux-ptrace.c (linux_ptrace_test_ret_to_nx): Likewise. * nto-tdep.c (nto_find_and_open_solib): Likewise. (nto_parse_redirection): Likewise. * objc-lang.c (objc_demangle): Likewise. (find_methods): Likewise. * objfiles.c (get_objfile_bfd_data): Likewise. (set_objfile_main_name): Likewise. (allocate_objfile): Likewise. (objfile_relocate): Likewise. (update_section_map): Likewise. * osabi.c (generic_elf_osabi_sniff_abi_tag_sections): Likewise. * p-exp.y (exp): Likewise. (yylex): Likewise. * p-valprint.c (pascal_object_print_value): Likewise. * parse.c (initialize_expout): Likewise. (mark_completion_tag): Likewise. (copy_name): Likewise. (parse_float): Likewise. (type_stack_reserve): Likewise. * ppc-linux-tdep.c (ppc_stap_parse_special_token): Likewise. (ppu2spu_prev_register): Likewise. * ppc-ravenscar-thread.c (supply_register_at_address): Likewise. * printcmd.c (printf_wide_c_string): Likewise. (printf_pointer): Likewise. * probe.c (parse_probes): Likewise. * python/py-cmd.c (gdbpy_parse_command_name): Likewise. (cmdpy_init): Likewise. * python/py-gdb-readline.c (gdbpy_readline_wrapper): Likewise. * python/py-symtab.c (set_sal): Likewise. * python/py-unwind.c (pyuw_sniffer): Likewise. * python/python.c (python_interactive_command): Likewise. (compute_python_string): Likewise. * ravenscar-thread.c (get_running_thread_id): Likewise. * record-full.c (record_full_exec_insn): Likewise. (record_full_core_open_1): Likewise. * regcache.c (regcache_raw_read_signed): Likewise. (regcache_raw_read_unsigned): Likewise. (regcache_cooked_read_signed): Likewise. (regcache_cooked_read_unsigned): Likewise. * remote-fileio.c (remote_fileio_func_open): Likewise. (remote_fileio_func_rename): Likewise. (remote_fileio_func_unlink): Likewise. (remote_fileio_func_stat): Likewise. (remote_fileio_func_system): Likewise. * remote-mips.c (mips_xfer_memory): Likewise. (mips_load_srec): Likewise. (pmon_end_download): Likewise. * remote.c (new_remote_state): Likewise. (map_regcache_remote_table): Likewise. (remote_register_number_and_offset): Likewise. (init_remote_state): Likewise. (get_memory_packet_size): Likewise. (remote_pass_signals): Likewise. (remote_program_signals): Likewise. (remote_start_remote): Likewise. (remote_check_symbols): Likewise. (remote_query_supported): Likewise. (extended_remote_attach): Likewise. (process_g_packet): Likewise. (store_registers_using_G): Likewise. (putpkt_binary): Likewise. (read_frame): Likewise. (compare_sections_command): Likewise. (remote_hostio_pread): Likewise. (remote_hostio_readlink): Likewise. (remote_file_put): Likewise. (remote_file_get): Likewise. (remote_pid_to_exec_file): Likewise. (_initialize_remote): Likewise. * rs6000-aix-tdep.c (rs6000_aix_ld_info_to_xml): Likewise. (rs6000_aix_core_xfer_shared_libraries_aix): Likewise. * rs6000-tdep.c (ppc_displaced_step_copy_insn): Likewise. (bfd_uses_spe_extensions): Likewise. * s390-linux-tdep.c (s390_displaced_step_copy_insn): Likewise. * score-tdep.c (score7_malloc_and_get_memblock): Likewise. * solib-dsbt.c (decode_loadmap): Likewise. (fetch_loadmap): Likewise. (scan_dyntag): Likewise. (enable_break): Likewise. (dsbt_relocate_main_executable): Likewise. * solib-frv.c (fetch_loadmap): Likewise. (enable_break2): Likewise. (frv_relocate_main_executable): Likewise. * solib-spu.c (spu_relocate_main_executable): Likewise. (spu_bfd_open): Likewise. * solib-svr4.c (lm_info_read): Likewise. (read_program_header): Likewise. (find_program_interpreter): Likewise. (scan_dyntag): Likewise. (elf_locate_base): Likewise. (open_symbol_file_object): Likewise. (read_program_headers_from_bfd): Likewise. (svr4_relocate_main_executable): Likewise. * solib-target.c (solib_target_relocate_section_addresses): Likewise. * solib.c (solib_find_1): Likewise. (exec_file_find): Likewise. (solib_find): Likewise. * source.c (openp): Likewise. (print_source_lines_base): Likewise. (forward_search_command): Likewise. * sparc-ravenscar-thread.c (supply_register_at_address): Likewise. * spu-tdep.c (spu2ppu_prev_register): Likewise. (spu_get_overlay_table): Likewise. * stabsread.c (patch_block_stabs): Likewise. (define_symbol): Likewise. (again:): Likewise. (read_member_functions): Likewise. (read_one_struct_field): Likewise. (read_enum_type): Likewise. (common_block_start): Likewise. * stack.c (read_frame_arg): Likewise. (backtrace_command): Likewise. * stap-probe.c (stap_parse_register_operand): Likewise. * symfile.c (syms_from_objfile_1): Likewise. (find_separate_debug_file): Likewise. (load_command): Likewise. (load_progress): Likewise. (load_section_callback): Likewise. (reread_symbols): Likewise. (add_filename_language): Likewise. (allocate_compunit_symtab): Likewise. (read_target_long_array): Likewise. (simple_read_overlay_table): Likewise. * symtab.c (symbol_set_names): Likewise. (resize_symbol_cache): Likewise. (rbreak_command): Likewise. (completion_list_add_name): Likewise. (completion_list_objc_symbol): Likewise. (add_filename_to_list): Likewise. * target-descriptions.c (maint_print_c_tdesc_cmd): Likewise. * target-memory.c (target_write_memory_blocks): Likewise. * target.c (target_read_string): Likewise. (read_whatever_is_readable): Likewise. (target_read_alloc_1): Likewise. (simple_search_memory): Likewise. (target_fileio_read_alloc_1): Likewise. * tilegx-tdep.c (tilegx_push_dummy_call): Likewise. * top.c (command_line_input): Likewise. * tracefile-tfile.c (tfile_fetch_registers): Likewise. * tracefile.c (tracefile_fetch_registers): Likewise. * tracepoint.c (add_memrange): Likewise. (init_collection_list): Likewise. (add_aexpr): Likewise. (trace_dump_actions): Likewise. (parse_trace_status): Likewise. (parse_tracepoint_definition): Likewise. (parse_tsv_definition): Likewise. (parse_static_tracepoint_marker_definition): Likewise. * tui/tui-file.c (tui_sfileopen): Likewise. (tui_file_adjust_strbuf): Likewise. * tui/tui-io.c (tui_expand_tabs): Likewise. * tui/tui-source.c (tui_set_source_content): Likewise. * typeprint.c (find_global_typedef): Likewise. * ui-file.c (do_ui_file_xstrdup): Likewise. (ui_file_obsavestring): Likewise. (mem_file_write): Likewise. * utils.c (make_hex_string): Likewise. (get_regcomp_error): Likewise. (puts_filtered_tabular): Likewise. (gdb_realpath_keepfile): Likewise. (ldirname): Likewise. (gdb_bfd_errmsg): Likewise. (substitute_path_component): Likewise. * valops.c (search_struct_method): Likewise. (find_oload_champ_namespace_loop): Likewise. * valprint.c (print_decimal_chars): Likewise. (read_string): Likewise. (generic_emit_char): Likewise. * varobj.c (varobj_delete): Likewise. (varobj_value_get_print_value): Likewise. * vaxobsd-tdep.c (vaxobsd_sigtramp_sniffer): Likewise. * windows-tdep.c (display_one_tib): Likewise. * xcoffread.c (read_xcoff_symtab): Likewise. (process_xcoff_symbol): Likewise. (swap_sym): Likewise. (scan_xcoff_symtab): Likewise. (xcoff_initial_scan): Likewise. * xml-support.c (gdb_xml_end_element): Likewise. (xml_process_xincludes): Likewise. (xml_fetch_content_from_file): Likewise. * xml-syscall.c (xml_list_of_syscalls): Likewise. * xstormy16-tdep.c (xstormy16_push_dummy_call): Likewise. gdb/gdbserver/ChangeLog: * ax.c (gdb_parse_agent_expr): Add cast to allocation result assignment. (gdb_unparse_agent_expr): Likewise. * hostio.c (require_data): Likewise. (handle_pread): Likewise. * linux-low.c (disable_regset): Likewise. (fetch_register): Likewise. (store_register): Likewise. (get_dynamic): Likewise. (linux_qxfer_libraries_svr4): Likewise. * mem-break.c (delete_fast_tracepoint_jump): Likewise. (set_fast_tracepoint_jump): Likewise. (uninsert_fast_tracepoint_jumps_at): Likewise. (reinsert_fast_tracepoint_jumps_at): Likewise. (validate_inserted_breakpoint): Likewise. (clone_agent_expr): Likewise. * regcache.c (init_register_cache): Likewise. * remote-utils.c (putpkt_binary_1): Likewise. (decode_M_packet): Likewise. (decode_X_packet): Likewise. (look_up_one_symbol): Likewise. (relocate_instruction): Likewise. (monitor_output): Likewise. * server.c (handle_search_memory): Likewise. (handle_qxfer_exec_file): Likewise. (handle_qxfer_libraries): Likewise. (handle_qxfer): Likewise. (handle_query): Likewise. (handle_v_cont): Likewise. (handle_v_run): Likewise. (captured_main): Likewise. * target.c (write_inferior_memory): Likewise. * thread-db.c (try_thread_db_load_from_dir): Likewise. * tracepoint.c (init_trace_buffer): Likewise. (add_tracepoint_action): Likewise. (add_traceframe): Likewise. (add_traceframe_block): Likewise. (cmd_qtdpsrc): Likewise. (cmd_qtdv): Likewise. (cmd_qtstatus): Likewise. (response_source): Likewise. (response_tsv): Likewise. (cmd_qtnotes): Likewise. (gdb_collect): Likewise. (initialize_tracepoint): Likewise.
2015-09-26 02:08:06 +08:00
bin = data = (gdb_byte *) xmalloc (size);
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
cleanup = make_cleanup (xfree, data);
/* We use hex encoding - see common/rsp-low.h. */
while (len > 0)
{
char hi, lo;
hi = *body_text++;
lo = *body_text++;
if (hi == 0 || lo == 0)
gdb_xml_error (parser, _("Bad hex encoding."));
*bin++ = fromhex (hi) * 16 + fromhex (lo);
len -= 2;
}
discard_cleanups (cleanup);
*pdata = data;
*psize = size;
}
/* Parse a btrace pt-config "cpu" xml record. */
static void
parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
const struct gdb_xml_element *element,
void *user_data,
VEC (gdb_xml_value_s) *attributes)
{
struct btrace_data *btrace;
const char *vendor;
ULONGEST *family, *model, *stepping;
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
btrace = (struct btrace_data *) user_data;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
if (strcmp (vendor, "GenuineIntel") == 0)
btrace->variant.pt.config.cpu.vendor = CV_INTEL;
btrace->variant.pt.config.cpu.family = *family;
btrace->variant.pt.config.cpu.model = *model;
btrace->variant.pt.config.cpu.stepping = *stepping;
}
/* Parse a btrace pt "raw" xml record. */
static void
parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
const struct gdb_xml_element *element,
void *user_data, const char *body_text)
{
struct btrace_data *btrace;
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
btrace = (struct btrace_data *) user_data;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
&btrace->variant.pt.size);
}
/* Parse a btrace "pt" xml record. */
static void
parse_xml_btrace_pt (struct gdb_xml_parser *parser,
const struct gdb_xml_element *element,
void *user_data, VEC (gdb_xml_value_s) *attributes)
{
struct btrace_data *btrace;
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
btrace = (struct btrace_data *) user_data;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
btrace->format = BTRACE_FORMAT_PT;
btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
btrace->variant.pt.data = NULL;
btrace->variant.pt.size = 0;
}
static const struct gdb_xml_attribute block_attributes[] = {
{ "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
};
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
{ "vendor", GDB_XML_AF_NONE, NULL, NULL },
{ "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
};
static const struct gdb_xml_element btrace_pt_config_children[] = {
{ "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
parse_xml_btrace_pt_config_cpu, NULL },
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
static const struct gdb_xml_element btrace_pt_children[] = {
{ "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
NULL },
{ "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
static const struct gdb_xml_attribute btrace_attributes[] = {
{ "version", GDB_XML_AF_NONE, NULL, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
};
static const struct gdb_xml_element btrace_children[] = {
{ "block", block_attributes, NULL,
GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
{ "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
NULL },
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
static const struct gdb_xml_element btrace_elements[] = {
{ "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
check_xml_btrace_version, NULL },
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
#endif /* defined (HAVE_LIBEXPAT) */
/* See btrace.h. */
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
void
parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
{
struct cleanup *cleanup;
int errcode;
#if defined (HAVE_LIBEXPAT)
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
btrace->format = BTRACE_FORMAT_NONE;
cleanup = make_cleanup_btrace_data (btrace);
errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
buffer, btrace);
if (errcode != 0)
btrace, gdbserver: read branch trace incrementally Read branch trace data incrementally and extend the current trace rather than discarding it and reading the entire trace buffer each time. If the branch trace buffer overflowed, we can't extend the current trace so we discard it and start anew by reading the entire branch trace buffer. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * common/linux-btrace.c (perf_event_read_bts, linux_read_btrace): Support delta reads. (linux_disable_btrace): Change return type. * common/linux-btrace.h (linux_read_btrace): Change parameters and return type to allow error reporting. Update users. (linux_disable_btrace): Change return type. Update users. * common/btrace-common.h (btrace_read_type) <BTRACE_READ_DELTA>: New. (btrace_error): New. (btrace_block) <begin>: Comment on BEGIN == 0. * btrace.c (btrace_compute_ftrace): Start from the end of the current trace. (btrace_stitch_trace, btrace_clear_history): New. (btrace_fetch): Read delta trace, return if replaying. (btrace_clear): Move clear history code to btrace_clear_history. (parse_xml_btrace): Throw an error if parsing failed. * target.h (struct target_ops) <to_read_btrace>: Change parameters and return type to allow error reporting. (target_read_btrace): Change parameters and return type to allow error reporting. * target.c (target_read_btrace): Update. * remote.c (remote_read_btrace): Support delta reads. Pass errors on. * NEWS: Announce it. gdbserver/ * target.h (target_ops) <read_btrace>: Change parameters and return type to allow error reporting. * server.c (handle_qxfer_btrace): Support delta reads. Pass trace reading errors on. * linux-low.c (linux_low_read_btrace): Pass trace reading errors on. (linux_low_disable_btrace): New.
2013-06-03 21:39:35 +08:00
error (_("Error parsing branch trace."));
/* Keep parse results. */
discard_cleanups (cleanup);
#else /* !defined (HAVE_LIBEXPAT) */
error (_("Cannot process branch trace. XML parsing is not supported."));
#endif /* !defined (HAVE_LIBEXPAT) */
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
#if defined (HAVE_LIBEXPAT)
/* Parse a btrace-conf "bts" xml record. */
static void
parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
const struct gdb_xml_element *element,
void *user_data, VEC (gdb_xml_value_s) *attributes)
{
struct btrace_config *conf;
record-btrace: add bts buffer size configuration option Allow the size of the branch trace ring buffer to be defined by the user. The specified buffer size will be used when BTS tracing is enabled for new threads. The obtained buffer size may differ from the requested size. The actual buffer size for the current thread is shown in the "info record" command. Bigger buffers mean longer traces, but also longer processing time. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (parse_xml_btrace_conf_bts): Add size. (btrace_conf_bts_attributes): New. (btrace_conf_children): Add attributes. * common/btrace-common.h (btrace_config_bts): New. (btrace_config)<bts>: New. (btrace_config): Update comment. * nat/linux-btrace.c (linux_enable_btrace, linux_enable_bts): Use config. * features/btrace-conf.dtd: Increment version. Add size attribute to bts element. * record-btrace.c (set_record_btrace_bts_cmdlist, show_record_btrace_bts_cmdlist): New. (record_btrace_adjust_size, record_btrace_print_bts_conf, record_btrace_print_conf, cmd_set_record_btrace_bts, cmd_show_record_btrace_bts): New. (record_btrace_info): Call record_btrace_print_conf. (_initialize_record_btrace): Add commands. * remote.c: Add PACKET_Qbtrace_conf_bts_size enum. (remote_protocol_features): Add Qbtrace-conf:bts:size packet. (btrace_sync_conf): Synchronize bts size. (_initialize_remote): Add Qbtrace-conf:bts:size packet. * NEWS: Announce new commands and new packets. doc/ * gdb.texinfo (Branch Trace Configuration Format): Add size. (Process Record and Replay): Describe new set|show commands. (General Query Packets): Describe Qbtrace-conf:bts:size packet. testsuite/ * gdb.btrace/buffer-size: New. gdbserver/ * linux-low.c (linux_low_btrace_conf): Print size. * server.c (handle_btrace_conf_general_set): New. (hanle_general_set): Call handle_btrace_conf_general_set. (handle_query): Report Qbtrace-conf:bts:size as supported.
2013-11-28 23:39:12 +08:00
struct gdb_xml_value *size;
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
conf = (struct btrace_config *) user_data;
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
conf->format = BTRACE_FORMAT_BTS;
record-btrace: add bts buffer size configuration option Allow the size of the branch trace ring buffer to be defined by the user. The specified buffer size will be used when BTS tracing is enabled for new threads. The obtained buffer size may differ from the requested size. The actual buffer size for the current thread is shown in the "info record" command. Bigger buffers mean longer traces, but also longer processing time. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (parse_xml_btrace_conf_bts): Add size. (btrace_conf_bts_attributes): New. (btrace_conf_children): Add attributes. * common/btrace-common.h (btrace_config_bts): New. (btrace_config)<bts>: New. (btrace_config): Update comment. * nat/linux-btrace.c (linux_enable_btrace, linux_enable_bts): Use config. * features/btrace-conf.dtd: Increment version. Add size attribute to bts element. * record-btrace.c (set_record_btrace_bts_cmdlist, show_record_btrace_bts_cmdlist): New. (record_btrace_adjust_size, record_btrace_print_bts_conf, record_btrace_print_conf, cmd_set_record_btrace_bts, cmd_show_record_btrace_bts): New. (record_btrace_info): Call record_btrace_print_conf. (_initialize_record_btrace): Add commands. * remote.c: Add PACKET_Qbtrace_conf_bts_size enum. (remote_protocol_features): Add Qbtrace-conf:bts:size packet. (btrace_sync_conf): Synchronize bts size. (_initialize_remote): Add Qbtrace-conf:bts:size packet. * NEWS: Announce new commands and new packets. doc/ * gdb.texinfo (Branch Trace Configuration Format): Add size. (Process Record and Replay): Describe new set|show commands. (General Query Packets): Describe Qbtrace-conf:bts:size packet. testsuite/ * gdb.btrace/buffer-size: New. gdbserver/ * linux-low.c (linux_low_btrace_conf): Print size. * server.c (handle_btrace_conf_general_set): New. (hanle_general_set): Call handle_btrace_conf_general_set. (handle_query): Report Qbtrace-conf:bts:size as supported.
2013-11-28 23:39:12 +08:00
conf->bts.size = 0;
size = xml_find_attribute (attributes, "size");
if (size != NULL)
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
}
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
/* Parse a btrace-conf "pt" xml record. */
static void
parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
const struct gdb_xml_element *element,
void *user_data, VEC (gdb_xml_value_s) *attributes)
{
struct btrace_config *conf;
struct gdb_xml_value *size;
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
conf = (struct btrace_config *) user_data;
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
conf->format = BTRACE_FORMAT_PT;
conf->pt.size = 0;
size = xml_find_attribute (attributes, "size");
if (size != NULL)
conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
}
static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
{ "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
};
record-btrace: add bts buffer size configuration option Allow the size of the branch trace ring buffer to be defined by the user. The specified buffer size will be used when BTS tracing is enabled for new threads. The obtained buffer size may differ from the requested size. The actual buffer size for the current thread is shown in the "info record" command. Bigger buffers mean longer traces, but also longer processing time. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (parse_xml_btrace_conf_bts): Add size. (btrace_conf_bts_attributes): New. (btrace_conf_children): Add attributes. * common/btrace-common.h (btrace_config_bts): New. (btrace_config)<bts>: New. (btrace_config): Update comment. * nat/linux-btrace.c (linux_enable_btrace, linux_enable_bts): Use config. * features/btrace-conf.dtd: Increment version. Add size attribute to bts element. * record-btrace.c (set_record_btrace_bts_cmdlist, show_record_btrace_bts_cmdlist): New. (record_btrace_adjust_size, record_btrace_print_bts_conf, record_btrace_print_conf, cmd_set_record_btrace_bts, cmd_show_record_btrace_bts): New. (record_btrace_info): Call record_btrace_print_conf. (_initialize_record_btrace): Add commands. * remote.c: Add PACKET_Qbtrace_conf_bts_size enum. (remote_protocol_features): Add Qbtrace-conf:bts:size packet. (btrace_sync_conf): Synchronize bts size. (_initialize_remote): Add Qbtrace-conf:bts:size packet. * NEWS: Announce new commands and new packets. doc/ * gdb.texinfo (Branch Trace Configuration Format): Add size. (Process Record and Replay): Describe new set|show commands. (General Query Packets): Describe Qbtrace-conf:bts:size packet. testsuite/ * gdb.btrace/buffer-size: New. gdbserver/ * linux-low.c (linux_low_btrace_conf): Print size. * server.c (handle_btrace_conf_general_set): New. (hanle_general_set): Call handle_btrace_conf_general_set. (handle_query): Report Qbtrace-conf:bts:size as supported.
2013-11-28 23:39:12 +08:00
static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
{ "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
};
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
static const struct gdb_xml_element btrace_conf_children[] = {
record-btrace: add bts buffer size configuration option Allow the size of the branch trace ring buffer to be defined by the user. The specified buffer size will be used when BTS tracing is enabled for new threads. The obtained buffer size may differ from the requested size. The actual buffer size for the current thread is shown in the "info record" command. Bigger buffers mean longer traces, but also longer processing time. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (parse_xml_btrace_conf_bts): Add size. (btrace_conf_bts_attributes): New. (btrace_conf_children): Add attributes. * common/btrace-common.h (btrace_config_bts): New. (btrace_config)<bts>: New. (btrace_config): Update comment. * nat/linux-btrace.c (linux_enable_btrace, linux_enable_bts): Use config. * features/btrace-conf.dtd: Increment version. Add size attribute to bts element. * record-btrace.c (set_record_btrace_bts_cmdlist, show_record_btrace_bts_cmdlist): New. (record_btrace_adjust_size, record_btrace_print_bts_conf, record_btrace_print_conf, cmd_set_record_btrace_bts, cmd_show_record_btrace_bts): New. (record_btrace_info): Call record_btrace_print_conf. (_initialize_record_btrace): Add commands. * remote.c: Add PACKET_Qbtrace_conf_bts_size enum. (remote_protocol_features): Add Qbtrace-conf:bts:size packet. (btrace_sync_conf): Synchronize bts size. (_initialize_remote): Add Qbtrace-conf:bts:size packet. * NEWS: Announce new commands and new packets. doc/ * gdb.texinfo (Branch Trace Configuration Format): Add size. (Process Record and Replay): Describe new set|show commands. (General Query Packets): Describe Qbtrace-conf:bts:size packet. testsuite/ * gdb.btrace/buffer-size: New. gdbserver/ * linux-low.c (linux_low_btrace_conf): Print size. * server.c (handle_btrace_conf_general_set): New. (hanle_general_set): Call handle_btrace_conf_general_set. (handle_query): Report Qbtrace-conf:bts:size as supported.
2013-11-28 23:39:12 +08:00
{ "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
parse_xml_btrace_conf_bts, NULL },
btrace: support Intel(R) Processor Trace Adds a new command "record btrace pt" to configure the kernel to use Intel(R) Processor Trace instead of Branch Trace Strore. The "record btrace" command chooses the tracing format automatically. Intel(R) Processor Trace support requires Linux 4.1 and libipt. gdb/ * NEWS: Announce new commands "record btrace pt" and "record pt". Announce new options "set|show record btrace pt buffer-size". * btrace.c: Include "rsp-low.h". Include "inttypes.h". (btrace_add_pc): Add forward declaration. (pt_reclassify_insn, ftrace_add_pt, btrace_pt_readmem_callback) (pt_translate_cpu_vendor, btrace_finalize_ftrace_pt) (btrace_compute_ftrace_pt): New. (btrace_compute_ftrace): Support BTRACE_FORMAT_PT. (check_xml_btrace_version): Update version check. (parse_xml_raw, parse_xml_btrace_pt_config_cpu) (parse_xml_btrace_pt_raw, parse_xml_btrace_pt) (btrace_pt_config_cpu_attributes, btrace_pt_config_children) (btrace_pt_children): New. (btrace_children): Add support for "pt". (parse_xml_btrace_conf_pt, btrace_conf_pt_attributes): New. (btrace_conf_children): Add support for "pt". * btrace.h: Include "intel-pt.h". (btrace_pt_error): New. * common/btrace-common.c (btrace_format_string, btrace_data_fini) (btrace_data_empty): Support BTRACE_FORMAT_PT. * common/btrace-common.h (btrace_format): Add BTRACE_FORMAT_PT. (struct btrace_config_pt): New. (struct btrace_config)<pt>: New. (struct btrace_data_pt_config, struct btrace_data_pt): New. (struct btrace_data)<pt>: New. * features/btrace-conf.dtd (btrace-conf)<pt>: New. (pt): New. * features/btrace.dtd (btrace)<pt>: New. (pt, pt-config, cpu): New. * nat/linux-btrace.c (perf_event_read, perf_event_read_all) (perf_event_pt_event_type, kernel_supports_pt) (linux_supports_pt): New. (linux_supports_btrace): Support BTRACE_FORMAT_PT. (linux_enable_bts): Free tinfo on error. (linux_enable_pt): New. (linux_enable_btrace): Support BTRACE_FORMAT_PT. (linux_disable_pt): New. (linux_disable_btrace): Support BTRACE_FORMAT_PT. (linux_fill_btrace_pt_config, linux_read_pt): New. (linux_read_btrace): Support BTRACE_FORMAT_PT. * nat/linux-btrace.h (struct btrace_tinfo_pt): New. (struct btrace_target_info)<pt>: New. * record-btrace.c (set_record_btrace_pt_cmdlist) (show_record_btrace_pt_cmdlist): New. (record_btrace_print_pt_conf): New. (record_btrace_print_conf): Support BTRACE_FORMAT_PT. (btrace_ui_out_decode_error): Support BTRACE_FORMAT_PT. (cmd_record_btrace_pt_start): New. (cmd_record_btrace_start): Support BTRACE_FORMAT_PT. (cmd_set_record_btrace_pt, cmd_show_record_btrace_pt): New. (_initialize_record_btrace): Add new commands. * remote.c (PACKET_Qbtrace_pt, PACKET_Qbtrace_conf_pt_size): New. (remote_protocol_features): Add "Qbtrace:pt". Add "Qbtrace-conf:pt:size". (remote_supports_btrace): Support BTRACE_FORMAT_PT. (btrace_sync_conf): Support PACKET_Qbtrace_conf_pt_size. (remote_enable_btrace): Support BTRACE_FORMAT_PT. (_initialize_remote): Add new commands. gdbserver/ * linux-low.c: Include "rsp-low.h" (linux_low_encode_pt_config, linux_low_encode_raw): New. (linux_low_read_btrace): Support BTRACE_FORMAT_PT. (linux_low_btrace_conf): Support BTRACE_FORMAT_PT. (handle_btrace_enable_pt): New. (handle_btrace_general_set): Support "pt". (handle_btrace_conf_general_set): Support "pt:size". doc/ * gdb.texinfo (Process Record and Replay): Spell out that variables and registers are not available during btrace replay. Describe the new "record btrace pt" command. Describe the new "set|show record btrace pt buffer-size" options. (General Query Packets): Describe the new Qbtrace:pt and Qbtrace-conf:pt:size packets. Expand "bts" to "Branch Trace Store". Update the branch trace DTD.
2014-01-24 20:45:47 +08:00
{ "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
parse_xml_btrace_conf_pt, NULL },
record btrace: add configuration struct Add a struct to describe the branch trace configuration and use it for enabling branch tracing. The user will be able to set configuration fields for each tracing format to be used for new threads. The actual configuration that is active for a given thread will be shown in the "info record" command. At the moment, the configuration struct only contains a format field that is set to the only available format. The format is the only configuration option that can not be set via set commands. It is given as argument to the "record btrace" command when starting recording. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (XMLFILES): Add btrace-conf.dtd. * x86-linux-nat.c (x86_linux_enable_btrace): Update parameters. (x86_linux_btrace_conf): New. (x86_linux_create_target): Initialize to_btrace_conf. * nat/linux-btrace.c (linux_enable_btrace): Update parameters. Check format. Split into this and ... (linux_enable_bts): ... this. (linux_btrace_conf): New. (perf_event_skip_record): Renamed into ... (perf_event_skip_bts_record): ... this. Updated users. (linux_disable_btrace): Split into this and ... (linux_disable_bts): ... this. (linux_read_btrace): Check format. * nat/linux-btrace.h (linux_enable_btrace): Update parameters. (linux_btrace_conf): New. (btrace_target_info)<ptid>: Moved. (btrace_target_info)<conf>: New. (btrace_target_info): Split into this and ... (btrace_tinfo_bts): ... this. Updated users. * btrace.c (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf_bts, parse_xml_btrace_conf) (btrace_conf_children, btrace_conf_attributes) (btrace_conf_elements): New. * btrace.h (btrace_enable): Update parameters. (btrace_conf, parse_xml_btrace_conf): New. * common/btrace-common.h (btrace_config): New. * feature/btrace-conf.dtd: New. * record-btrace.c (record_btrace_conf): New. (record_btrace_cmdlist): New. (record_btrace_enable_warn, record_btrace_open): Pass &record_btrace_conf. (record_btrace_info): Print recording format. (cmd_record_btrace_bts_start): New. (cmd_record_btrace_start): Call cmd_record_btrace_bts_start. (_initialize_record_btrace): Add "record btrace bts" subcommand. Add "record bts" alias command. * remote.c (remote_state)<btrace_config>: New. (remote_btrace_reset, PACKET_qXfer_btrace_conf): New. (remote_protocol_features): Add qXfer:btrace-conf:read. (remote_open_1): Call remote_btrace_reset. (remote_xfer_partial): Handle TARGET_OBJECT_BTRACE_CONF. (btrace_target_info)<conf>: New. (btrace_sync_conf, btrace_read_config): New. (remote_enable_btrace): Update parameters. Call btrace_sync_conf and btrace_read_conf. (remote_btrace_conf): New. (init_remote_ops): Initialize to_btrace_conf. (_initialize_remote): Add qXfer:btrace-conf packet. * target.c (target_enable_btrace): Update parameters. (target_btrace_conf): New. * target.h (target_enable_btrace): Update parameters. (target_btrace_conf): New. (target_object)<TARGET_OBJECT_BTRACE_CONF>: New. (target_ops)<to_enable_btrace>: Update parameters and comment. (target_ops)<to_btrace_conf>: New. * target-delegates: Regenerate. * target-debug.h (target_debug_print_const_struct_btrace_config_p) (target_debug_print_const_struct_btrace_target_info_p): New. NEWS: Announce new command and new packet. doc/ * gdb.texinfo (Process Record and Replay): Describe the "record btrace bts" command. (General Query Packets): Describe qXfer:btrace-conf:read packet. (Branch Trace Configuration Format): New. gdbserver/ * linux-low.c (linux_low_enable_btrace): Update parameters. (linux_low_btrace_conf): New. (linux_target_ops)<to_btrace_conf>: Initialize. * server.c (current_btrace_conf): New. (handle_btrace_enable): Rename to ... (handle_btrace_enable_bts): ... this. Pass &current_btrace_conf to target_enable_btrace. Update comment. Update users. (handle_qxfer_btrace_conf): New. (qxfer_packets): Add btrace-conf entry. (handle_query): Report qXfer:btrace-conf:read as supported packet. * target.h (target_ops)<enable_btrace>: Update parameters and comment. (target_ops)<read_btrace_conf>: New. (target_enable_btrace): Update parameters. (target_read_btrace_conf): New. testsuite/ * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2013-11-28 22:44:13 +08:00
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
static const struct gdb_xml_attribute btrace_conf_attributes[] = {
{ "version", GDB_XML_AF_NONE, NULL, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
};
static const struct gdb_xml_element btrace_conf_elements[] = {
{ "btrace-conf", btrace_conf_attributes, btrace_conf_children,
GDB_XML_EF_NONE, NULL, NULL },
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
#endif /* defined (HAVE_LIBEXPAT) */
/* See btrace.h. */
void
parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
{
int errcode;
#if defined (HAVE_LIBEXPAT)
errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
btrace_conf_elements, xml, conf);
if (errcode != 0)
error (_("Error parsing branch trace configuration."));
#else /* !defined (HAVE_LIBEXPAT) */
error (_("XML parsing is not supported."));
#endif /* !defined (HAVE_LIBEXPAT) */
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* See btrace.h. */
const struct btrace_insn *
btrace_insn_get (const struct btrace_insn_iterator *it)
{
const struct btrace_function *bfun;
unsigned int index, end;
index = it->insn_index;
bfun = &it->btinfo->functions[it->call_index];
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* Check if the iterator points to a gap in the trace. */
if (bfun->errcode != 0)
return NULL;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* The index is within the bounds of this function's instruction vector. */
end = bfun->insn.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
gdb_assert (0 < end);
gdb_assert (index < end);
return &bfun->insn[index];
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
int
btrace_insn_get_error (const struct btrace_insn_iterator *it)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
return it->btinfo->functions[it->call_index].errcode;
}
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* See btrace.h. */
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
unsigned int
btrace_insn_number (const struct btrace_insn_iterator *it)
{
return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
void
btrace_insn_begin (struct btrace_insn_iterator *it,
const struct btrace_thread_info *btinfo)
{
if (btinfo->functions.empty ())
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
error (_("No trace."));
it->btinfo = btinfo;
it->call_index = 0;
it->insn_index = 0;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
void
btrace_insn_end (struct btrace_insn_iterator *it,
const struct btrace_thread_info *btinfo)
{
const struct btrace_function *bfun;
unsigned int length;
if (btinfo->functions.empty ())
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
error (_("No trace."));
bfun = &btinfo->functions.back ();
length = bfun->insn.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* The last function may either be a gap or it contains the current
instruction, which is one past the end of the execution trace; ignore
it. */
if (length > 0)
length -= 1;
it->btinfo = btinfo;
it->call_index = bfun->number - 1;
it->insn_index = length;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
unsigned int
btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
{
const struct btrace_function *bfun;
unsigned int index, steps;
bfun = &it->btinfo->functions[it->call_index];
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
steps = 0;
index = it->insn_index;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
while (stride != 0)
{
unsigned int end, space, adv;
end = bfun->insn.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* An empty function segment represents a gap in the trace. We count
it as one instruction. */
if (end == 0)
{
const struct btrace_function *next;
next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
if (next == NULL)
break;
stride -= 1;
steps += 1;
bfun = next;
index = 0;
continue;
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
gdb_assert (0 < end);
gdb_assert (index < end);
/* Compute the number of instructions remaining in this segment. */
space = end - index;
/* Advance the iterator as far as possible within this segment. */
gdb: Use std::min and std::max throughout Otherwise including <string> or some other C++ header is broken. E.g.: In file included from /opt/gcc/include/c++/7.0.0/bits/char_traits.h:39:0, from /opt/gcc/include/c++/7.0.0/string:40, from /home/pedro/gdb/mygit/cxx-convertion/src/gdb/infrun.c:68: /opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:243:56: error: macro "min" passed 3 arguments, but takes just 2 min(const _Tp& __a, const _Tp& __b, _Compare __comp) ^ /opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:265:56: error: macro "max" passed 3 arguments, but takes just 2 max(const _Tp& __a, const _Tp& __b, _Compare __comp) ^ In file included from .../src/gdb/infrun.c:21:0: To the best of my grepping abilities, I believe I adjusted all min/max calls. gdb/ChangeLog: 2016-09-16 Pedro Alves <palves@redhat.com> * defs.h (min, max): Delete. * aarch64-tdep.c: Include <algorithm> and use std::min and std::max throughout. * aarch64-tdep.c: Likewise. * alpha-tdep.c: Likewise. * amd64-tdep.c: Likewise. * amd64-windows-tdep.c: Likewise. * arm-tdep.c: Likewise. * avr-tdep.c: Likewise. * breakpoint.c: Likewise. * btrace.c: Likewise. * ctf.c: Likewise. * disasm.c: Likewise. * doublest.c: Likewise. * dwarf2loc.c: Likewise. * dwarf2read.c: Likewise. * environ.c: Likewise. * exec.c: Likewise. * f-exp.y: Likewise. * findcmd.c: Likewise. * ft32-tdep.c: Likewise. * gcore.c: Likewise. * hppa-tdep.c: Likewise. * i386-darwin-tdep.c: Likewise. * i386-tdep.c: Likewise. * linux-thread-db.c: Likewise. * lm32-tdep.c: Likewise. * m32r-tdep.c: Likewise. * m88k-tdep.c: Likewise. * memrange.c: Likewise. * minidebug.c: Likewise. * mips-tdep.c: Likewise. * moxie-tdep.c: Likewise. * nds32-tdep.c: Likewise. * nios2-tdep.c: Likewise. * nto-procfs.c: Likewise. * parse.c: Likewise. * ppc-sysv-tdep.c: Likewise. * probe.c: Likewise. * record-btrace.c: Likewise. * remote.c: Likewise. * rs6000-tdep.c: Likewise. * rx-tdep.c: Likewise. * s390-linux-nat.c: Likewise. * s390-linux-tdep.c: Likewise. * ser-tcp.c: Likewise. * sh-tdep.c: Likewise. * sh64-tdep.c: Likewise. * source.c: Likewise. * sparc-tdep.c: Likewise. * symfile.c: Likewise. * target-memory.c: Likewise. * target.c: Likewise. * tic6x-tdep.c: Likewise. * tilegx-tdep.c: Likewise. * tracefile-tfile.c: Likewise. * tracepoint.c: Likewise. * valprint.c: Likewise. * value.c: Likewise. * xtensa-tdep.c: Likewise. * cli/cli-cmds.c: Likewise. * compile/compile-object-load.c: Likewise.
2016-09-17 02:55:17 +08:00
adv = std::min (space, stride);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
stride -= adv;
index += adv;
steps += adv;
/* Move to the next function if we're at the end of this one. */
if (index == end)
{
const struct btrace_function *next;
next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (next == NULL)
{
/* We stepped past the last function.
Let's adjust the index to point to the last instruction in
the previous function. */
index -= 1;
steps -= 1;
break;
}
/* We now point to the first instruction in the new function. */
bfun = next;
index = 0;
}
/* We did make progress. */
gdb_assert (adv > 0);
}
/* Update the iterator. */
it->call_index = bfun->number - 1;
it->insn_index = index;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
return steps;
}
/* See btrace.h. */
unsigned int
btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
{
const struct btrace_function *bfun;
unsigned int index, steps;
bfun = &it->btinfo->functions[it->call_index];
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
steps = 0;
index = it->insn_index;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
while (stride != 0)
{
unsigned int adv;
/* Move to the previous function if we're at the start of this one. */
if (index == 0)
{
const struct btrace_function *prev;
prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (prev == NULL)
break;
/* We point to one after the last instruction in the new function. */
bfun = prev;
index = bfun->insn.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
/* An empty function segment represents a gap in the trace. We count
it as one instruction. */
if (index == 0)
{
stride -= 1;
steps += 1;
continue;
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* Advance the iterator as far as possible within this segment. */
gdb: Use std::min and std::max throughout Otherwise including <string> or some other C++ header is broken. E.g.: In file included from /opt/gcc/include/c++/7.0.0/bits/char_traits.h:39:0, from /opt/gcc/include/c++/7.0.0/string:40, from /home/pedro/gdb/mygit/cxx-convertion/src/gdb/infrun.c:68: /opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:243:56: error: macro "min" passed 3 arguments, but takes just 2 min(const _Tp& __a, const _Tp& __b, _Compare __comp) ^ /opt/gcc/include/c++/7.0.0/bits/stl_algobase.h:265:56: error: macro "max" passed 3 arguments, but takes just 2 max(const _Tp& __a, const _Tp& __b, _Compare __comp) ^ In file included from .../src/gdb/infrun.c:21:0: To the best of my grepping abilities, I believe I adjusted all min/max calls. gdb/ChangeLog: 2016-09-16 Pedro Alves <palves@redhat.com> * defs.h (min, max): Delete. * aarch64-tdep.c: Include <algorithm> and use std::min and std::max throughout. * aarch64-tdep.c: Likewise. * alpha-tdep.c: Likewise. * amd64-tdep.c: Likewise. * amd64-windows-tdep.c: Likewise. * arm-tdep.c: Likewise. * avr-tdep.c: Likewise. * breakpoint.c: Likewise. * btrace.c: Likewise. * ctf.c: Likewise. * disasm.c: Likewise. * doublest.c: Likewise. * dwarf2loc.c: Likewise. * dwarf2read.c: Likewise. * environ.c: Likewise. * exec.c: Likewise. * f-exp.y: Likewise. * findcmd.c: Likewise. * ft32-tdep.c: Likewise. * gcore.c: Likewise. * hppa-tdep.c: Likewise. * i386-darwin-tdep.c: Likewise. * i386-tdep.c: Likewise. * linux-thread-db.c: Likewise. * lm32-tdep.c: Likewise. * m32r-tdep.c: Likewise. * m88k-tdep.c: Likewise. * memrange.c: Likewise. * minidebug.c: Likewise. * mips-tdep.c: Likewise. * moxie-tdep.c: Likewise. * nds32-tdep.c: Likewise. * nios2-tdep.c: Likewise. * nto-procfs.c: Likewise. * parse.c: Likewise. * ppc-sysv-tdep.c: Likewise. * probe.c: Likewise. * record-btrace.c: Likewise. * remote.c: Likewise. * rs6000-tdep.c: Likewise. * rx-tdep.c: Likewise. * s390-linux-nat.c: Likewise. * s390-linux-tdep.c: Likewise. * ser-tcp.c: Likewise. * sh-tdep.c: Likewise. * sh64-tdep.c: Likewise. * source.c: Likewise. * sparc-tdep.c: Likewise. * symfile.c: Likewise. * target-memory.c: Likewise. * target.c: Likewise. * tic6x-tdep.c: Likewise. * tilegx-tdep.c: Likewise. * tracefile-tfile.c: Likewise. * tracepoint.c: Likewise. * valprint.c: Likewise. * value.c: Likewise. * xtensa-tdep.c: Likewise. * cli/cli-cmds.c: Likewise. * compile/compile-object-load.c: Likewise.
2016-09-17 02:55:17 +08:00
adv = std::min (index, stride);
record-btrace: indicate gaps Indicate gaps in the trace due to decode errors. Internally, a gap is represented as a btrace function segment without instructions and with a non-zero format-specific error code. Show the gap when traversing the instruction or function call history. Also indicate gaps in "info record". It looks like this: (gdb) info record Active record target: record-btrace Recording format: Branch Trace Store. Buffer size: 64KB. Recorded 32 instructions in 5 functions (1 gaps) for thread 1 (process 7182). (gdb) record function-call-history /cli 1 fib inst 1,9 at src/fib.c:9,14 2 fib inst 10,20 at src/fib.c:6,14 3 [decode error (1): instruction overflow] 4 fib inst 21,28 at src/fib.c:11,14 5 fib inst 29,33 at src/fib.c:6,9 (gdb) record instruction-history 20,22 20 0x000000000040062f <fib+47>: sub $0x1,%rax [decode error (1): instruction overflow] 21 0x0000000000400613 <fib+19>: add $0x1,%rax 22 0x0000000000400617 <fib+23>: mov %rax,0x200a3a(%rip) (gdb) Gaps are ignored during reverse execution and replay. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * btrace.c (ftrace_find_call): Skip gaps. (ftrace_new_function): Initialize level. (ftrace_new_call, ftrace_new_tailcall, ftrace_new_return) (ftrace_new_switch): Update level computation. (ftrace_new_gap): New. (ftrace_update_function): Create new function after gap. (btrace_compute_ftrace_bts): Create gap on error. (btrace_stitch_bts): Update parameters. Clear trace if it becomes empty. (btrace_stitch_trace): Update parameters. Update callers. (btrace_clear): Reset the number of gaps. (btrace_insn_get): Return NULL if the iterator points to a gap. (btrace_insn_number): Return zero if the iterator points to a gap. (btrace_insn_end): Allow gaps at the end. (btrace_insn_next, btrace_insn_prev, btrace_insn_cmp): Handle gaps. (btrace_find_insn_by_number): Assert that the found iterator does not point to a gap. (btrace_call_next, btrace_call_prev): Assert that the last function is not a gap. * btrace.h (btrace_bts_error): New. (btrace_function): Update comment. (btrace_function) <insn, insn_offset, number>: Update comment. (btrace_function) <errcode>: New. (btrace_thread_info) <ngaps>: New. (btrace_thread_info) <replay>: Update comment. (btrace_insn_get): Update comment. * record-btrace.c (btrace_ui_out_decode_error): New. (record_btrace_info): Print number of gaps. (btrace_insn_history, btrace_call_history): Call btrace_ui_out_decode_error for gaps. (record_btrace_step_thread, record_btrace_start_replaying): Skip gaps. testsuite/ * gdb.btrace/buffer-size.exp: Update "info record" output. * gdb.btrace/delta.exp: Update "info record" output. * gdb.btrace/enable.exp: Update "info record" output. * gdb.btrace/finish.exp: Update "info record" output. * gdb.btrace/instruction_history.exp: Update "info record" output. * gdb.btrace/next.exp: Update "info record" output. * gdb.btrace/nexti.exp: Update "info record" output. * gdb.btrace/step.exp: Update "info record" output. * gdb.btrace/stepi.exp: Update "info record" output. * gdb.btrace/nohist.exp: Update "info record" output.
2014-01-30 16:51:10 +08:00
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
stride -= adv;
index -= adv;
steps += adv;
/* We did make progress. */
gdb_assert (adv > 0);
}
/* Update the iterator. */
it->call_index = bfun->number - 1;
it->insn_index = index;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
return steps;
}
/* See btrace.h. */
int
btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
const struct btrace_insn_iterator *rhs)
{
gdb_assert (lhs->btinfo == rhs->btinfo);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (lhs->call_index != rhs->call_index)
return lhs->call_index - rhs->call_index;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
return lhs->insn_index - rhs->insn_index;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
int
btrace_find_insn_by_number (struct btrace_insn_iterator *it,
const struct btrace_thread_info *btinfo,
unsigned int number)
{
const struct btrace_function *bfun;
unsigned int upper, lower;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (btinfo->functions.empty ())
return 0;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
lower = 0;
bfun = &btinfo->functions[lower];
if (number < bfun->insn_offset)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
return 0;
upper = btinfo->functions.size () - 1;
bfun = &btinfo->functions[upper];
if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
return 0;
/* We assume that there are no holes in the numbering. */
for (;;)
{
const unsigned int average = lower + (upper - lower) / 2;
bfun = &btinfo->functions[average];
if (number < bfun->insn_offset)
{
upper = average - 1;
continue;
}
if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
{
lower = average + 1;
continue;
}
break;
}
it->btinfo = btinfo;
it->call_index = bfun->number - 1;
it->insn_index = number - bfun->insn_offset;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
return 1;
}
/* Returns true if the recording ends with a function segment that
contains only a single (i.e. the current) instruction. */
static bool
btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
{
const btrace_function *bfun;
if (btinfo->functions.empty ())
return false;
bfun = &btinfo->functions.back ();
if (bfun->errcode != 0)
return false;
return ftrace_call_num_insn (bfun) == 1;
}
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* See btrace.h. */
const struct btrace_function *
btrace_call_get (const struct btrace_call_iterator *it)
{
if (it->index >= it->btinfo->functions.size ())
return NULL;
return &it->btinfo->functions[it->index];
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
unsigned int
btrace_call_number (const struct btrace_call_iterator *it)
{
const unsigned int length = it->btinfo->functions.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* If the last function segment contains only a single instruction (i.e. the
current instruction), skip it. */
if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
return length;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
return it->index + 1;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
void
btrace_call_begin (struct btrace_call_iterator *it,
const struct btrace_thread_info *btinfo)
{
if (btinfo->functions.empty ())
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
error (_("No trace."));
it->btinfo = btinfo;
it->index = 0;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
void
btrace_call_end (struct btrace_call_iterator *it,
const struct btrace_thread_info *btinfo)
{
if (btinfo->functions.empty ())
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
error (_("No trace."));
it->btinfo = btinfo;
it->index = btinfo->functions.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
unsigned int
btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
{
const unsigned int length = it->btinfo->functions.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (it->index + stride < length - 1)
/* Default case: Simply advance the iterator. */
it->index += stride;
else if (it->index + stride == length - 1)
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
/* We land exactly at the last function segment. If it contains only one
instruction (i.e. the current instruction) it is not actually part of
the trace. */
if (btrace_ends_with_single_insn (it->btinfo))
it->index = length;
else
it->index = length - 1;
}
else
{
/* We land past the last function segment and have to adjust the stride.
If the last function segment contains only one instruction (i.e. the
current instruction) it is not actually part of the trace. */
if (btrace_ends_with_single_insn (it->btinfo))
stride = length - it->index - 1;
else
stride = length - it->index;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
it->index = length;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
return stride;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
unsigned int
btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
{
const unsigned int length = it->btinfo->functions.size ();
int steps = 0;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
gdb_assert (it->index <= length);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if (stride == 0 || it->index == 0)
return 0;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
/* If we are at the end, the first step is a special case. If the last
function segment contains only one instruction (i.e. the current
instruction) it is not actually part of the trace. To be able to step
over this instruction, we need at least one more function segment. */
if ((it->index == length) && (length > 1))
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
{
if (btrace_ends_with_single_insn (it->btinfo))
it->index = length - 2;
else
it->index = length - 1;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
steps = 1;
stride -= 1;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
stride = std::min (stride, it->index);
it->index -= stride;
return steps + stride;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
int
btrace_call_cmp (const struct btrace_call_iterator *lhs,
const struct btrace_call_iterator *rhs)
{
gdb_assert (lhs->btinfo == rhs->btinfo);
return (int) (lhs->index - rhs->index);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
int
btrace_find_call_by_number (struct btrace_call_iterator *it,
const struct btrace_thread_info *btinfo,
unsigned int number)
{
const unsigned int length = btinfo->functions.size ();
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
if ((number == 0) || (number > length))
return 0;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
it->btinfo = btinfo;
it->index = number - 1;
return 1;
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
}
/* See btrace.h. */
void
btrace_set_insn_history (struct btrace_thread_info *btinfo,
const struct btrace_insn_iterator *begin,
const struct btrace_insn_iterator *end)
{
if (btinfo->insn_history == NULL)
Replace some xmalloc-family functions with XNEW-family ones This patch is part of the make-gdb-buildable-in-C++ effort. The idea is to change some calls to the xmalloc family of functions to calls to the equivalents in the XNEW family. This avoids adding an explicit cast, so it keeps the code a bit more readable. Some of them also map relatively well to a C++ equivalent (XNEW (struct foo) -> new foo), so it will be possible to do scripted replacements if needed. I only changed calls that were obviously allocating memory for one or multiple "objects". Allocation of variable sizes (such as strings or buffer handling) will be for later (and won't use XNEW). - xmalloc (sizeof (struct foo)) -> XNEW (struct foo) - xmalloc (num * sizeof (struct foo)) -> XNEWVEC (struct foo, num) - xcalloc (1, sizeof (struct foo)) -> XCNEW (struct foo) - xcalloc (num, sizeof (struct foo)) -> XCNEWVEC (struct foo, num) - xrealloc (p, num * sizeof (struct foo) -> XRESIZEVEC (struct foo, p, num) - obstack_alloc (ob, sizeof (struct foo)) -> XOBNEW (ob, struct foo) - obstack_alloc (ob, num * sizeof (struct foo)) -> XOBNEWVEC (ob, struct foo, num) - alloca (sizeof (struct foo)) -> XALLOCA (struct foo) - alloca (num * sizeof (struct foo)) -> XALLOCAVEC (struct foo, num) Some instances of xmalloc followed by memset to zero the buffer were replaced by XCNEW or XCNEWVEC. I regtested on x86-64, Ubuntu 14.04, but the patch touches many architecture-specific files. For those I'll have to rely on the buildbot or people complaining that I broke their gdb. gdb/ChangeLog: * aarch64-linux-nat.c (aarch64_add_process): Likewise. * aarch64-tdep.c (aarch64_gdbarch_init): Likewise. * ada-exp.y (write_ambiguous_var): Likewise. * ada-lang.c (resolve_subexp): Likewise. (user_select_syms): Likewise. (assign_aggregate): Likewise. (ada_evaluate_subexp): Likewise. (cache_symbol): Likewise. * addrmap.c (allocate_key): Likewise. (addrmap_create_mutable): Likewise. * aix-thread.c (sync_threadlists): Likewise. * alpha-tdep.c (alpha_push_dummy_call): Likewise. (alpha_gdbarch_init): Likewise. * amd64-windows-tdep.c (amd64_windows_push_arguments): Likewise. * arm-linux-nat.c (arm_linux_add_process): Likewise. * arm-linux-tdep.c (arm_linux_displaced_step_copy_insn): Likewise. * arm-tdep.c (push_stack_item): Likewise. (arm_displaced_step_copy_insn): Likewise. (arm_gdbarch_init): Likewise. (_initialize_arm_tdep): Likewise. * avr-tdep.c (push_stack_item): Likewise. * ax-general.c (new_agent_expr): Likewise. * block.c (block_initialize_namespace): Likewise. * breakpoint.c (alloc_counted_command_line): Likewise. (update_dprintf_command_list): Likewise. (parse_breakpoint_sals): Likewise. (decode_static_tracepoint_spec): Likewise. (until_break_command): Likewise. (clear_command): Likewise. (update_global_location_list): Likewise. (get_breakpoint_objfile_data) Likewise. * btrace.c (ftrace_new_function): Likewise. (btrace_set_insn_history): Likewise. (btrace_set_call_history): Likewise. * buildsym.c (add_symbol_to_list): Likewise. (record_pending_block): Likewise. (start_subfile): Likewise. (start_buildsym_compunit): Likewise. (push_subfile): Likewise. (end_symtab_get_static_block): Likewise. (buildsym_init): Likewise. * cli/cli-cmds.c (source_command): Likewise. * cli/cli-decode.c (add_cmd): Likewise. * cli/cli-script.c (build_command_line): Likewise. (setup_user_args): Likewise. (realloc_body_list): Likewise. (process_next_line): Likewise. (copy_command_lines): Likewise. * cli/cli-setshow.c (do_set_command): Likewise. * coff-pe-read.c (read_pe_exported_syms): Likewise. * coffread.c (coff_locate_sections): Likewise. (coff_symtab_read): Likewise. (coff_read_struct_type): Likewise. * common/cleanups.c (make_my_cleanup2): Likewise. * common/common-exceptions.c (throw_it): Likewise. * common/filestuff.c (make_cleanup_close): Likewise. * common/format.c (parse_format_string): Likewise. * common/queue.h (DEFINE_QUEUE_P): Likewise. * compile/compile-object-load.c (munmap_list_add): Likewise. (compile_object_load): Likewise. * compile/compile-object-run.c (compile_object_run): Likewise. * compile/compile.c (append_args): Likewise. * corefile.c (specify_exec_file_hook): Likewise. * cp-support.c (make_symbol_overload_list): Likewise. * cris-tdep.c (push_stack_item): Likewise. (cris_gdbarch_init): Likewise. * ctf.c (ctf_trace_file_writer_new): Likewise. * dbxread.c (init_header_files): Likewise. (add_new_header_file): Likewise. (init_bincl_list): Likewise. (dbx_end_psymtab): Likewise. (start_psymtab): Likewise. (dbx_end_psymtab): Likewise. * dcache.c (dcache_init): Likewise. * dictionary.c (dict_create_hashed): Likewise. (dict_create_hashed_expandable): Likewise. (dict_create_linear): Likewise. (dict_create_linear_expandable): Likewise. * dtrace-probe.c (dtrace_process_dof_probe): Likewise. * dummy-frame.c (register_dummy_frame_dtor): Likewise. * dwarf2-frame-tailcall.c (cache_new_ref1): Likewise. * dwarf2-frame.c (dwarf2_build_frame_info): Likewise. (decode_frame_entry_1): Likewise. * dwarf2expr.c (new_dwarf_expr_context): Likewise. * dwarf2loc.c (dwarf2_compile_expr_to_ax): Likewise. * dwarf2read.c (dwarf2_has_info): Likewise. (create_signatured_type_table_from_index): Likewise. (dwarf2_read_index): Likewise. (dw2_get_file_names_reader): Likewise. (create_all_type_units): Likewise. (read_cutu_die_from_dwo): Likewise. (init_tu_and_read_dwo_dies): Likewise. (init_cutu_and_read_dies): Likewise. (create_all_comp_units): Likewise. (queue_comp_unit): Likewise. (inherit_abstract_dies): Likewise. (read_call_site_scope): Likewise. (dwarf2_add_field): Likewise. (dwarf2_add_typedef): Likewise. (dwarf2_add_member_fn): Likewise. (attr_to_dynamic_prop): Likewise. (abbrev_table_alloc_abbrev): Likewise. (abbrev_table_read_table): Likewise. (add_include_dir): Likewise. (add_file_name): Likewise. (dwarf_decode_line_header): Likewise. (dwarf2_const_value_attr): Likewise. (dwarf_alloc_block): Likewise. (parse_macro_definition): Likewise. (set_die_type): Likewise. (write_psymtabs_to_index): Likewise. (create_cus_from_index): Likewise. (dwarf2_create_include_psymtab): Likewise. (process_psymtab_comp_unit_reader): Likewise. (build_type_psymtab_dependencies): Likewise. (read_comp_units_from_section): Likewise. (compute_compunit_symtab_includes): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (read_func_scope): Likewise. (process_structure_scope): Likewise. (mark_common_block_symbol_computed): Likewise. (load_partial_dies): Likewise. (dwarf2_symbol_mark_computed): Likewise. * elfread.c (elf_symfile_segments): Likewise. (elf_read_minimal_symbols): Likewise. * environ.c (make_environ): Likewise. * eval.c (evaluate_subexp_standard): Likewise. * event-loop.c (create_file_handler): Likewise. (create_async_signal_handler): Likewise. (create_async_event_handler): Likewise. (create_timer): Likewise. * exec.c (build_section_table): Likewise. * fbsd-nat.c (fbsd_remember_child): Likewise. * fork-child.c (fork_inferior): Likewise. * frv-tdep.c (new_variant): Likewise. * gdbarch.sh (gdbarch_alloc): Likewise. (append_name): Likewise. * gdbtypes.c (rank_function): Likewise. (copy_type_recursive): Likewise. (add_dyn_prop): Likewise. * gnu-nat.c (make_proc): Likewise. (make_inf): Likewise. (gnu_write_inferior): Likewise. * gnu-v3-abi.c (build_gdb_vtable_type): Likewise. (build_std_type_info_type): Likewise. * guile/scm-param.c (compute_enum_list): Likewise. * guile/scm-utils.c (gdbscm_parse_function_args): Likewise. * guile/scm-value.c (gdbscm_value_call): Likewise. * h8300-tdep.c (h8300_gdbarch_init): Likewise. * hppa-tdep.c (hppa_init_objfile_priv_data): Likewise. (read_unwind_info): Likewise. * ia64-tdep.c (ia64_gdbarch_init): Likewise. * infcall.c (dummy_frame_context_saver_setup): Likewise. (call_function_by_hand_dummy): Likewise. * infcmd.c (step_once): Likewise. (finish_forward): Likewise. (attach_command): Likewise. (notice_new_inferior): Likewise. * inferior.c (add_inferior_silent): Likewise. * infrun.c (add_displaced_stepping_state): Likewise. (save_infcall_control_state): Likewise. (save_inferior_ptid): Likewise. (_initialize_infrun): Likewise. * jit.c (bfd_open_from_target_memory): Likewise. (jit_gdbarch_data_init): Likewise. * language.c (add_language): Likewise. * linespec.c (decode_line_2): Likewise. * linux-nat.c (add_to_pid_list): Likewise. (add_initial_lwp): Likewise. * linux-thread-db.c (add_thread_db_info): Likewise. (record_thread): Likewise. (info_auto_load_libthread_db): Likewise. * m32c-tdep.c (m32c_gdbarch_init): Likewise. * m68hc11-tdep.c (m68hc11_gdbarch_init): Likewise. * m68k-tdep.c (m68k_gdbarch_init): Likewise. * m88k-tdep.c (m88k_analyze_prologue): Likewise. * macrocmd.c (macro_define_command): Likewise. * macroexp.c (gather_arguments): Likewise. * macroscope.c (sal_macro_scope): Likewise. * macrotab.c (new_macro_table): Likewise. * mdebugread.c (push_parse_stack): Likewise. (parse_partial_symbols): Likewise. (parse_symbol): Likewise. (psymtab_to_symtab_1): Likewise. (new_block): Likewise. (new_psymtab): Likewise. (mdebug_build_psymtabs): Likewise. (add_pending): Likewise. (elfmdebug_build_psymtabs): Likewise. * mep-tdep.c (mep_gdbarch_init): Likewise. * mi/mi-main.c (mi_execute_command): Likewise. * mi/mi-parse.c (mi_parse_argv): Likewise. * minidebug.c (lzma_open): Likewise. * minsyms.c (terminate_minimal_symbol_table): Likewise. * mips-linux-nat.c (mips_linux_insert_watchpoint): Likewise. * mips-tdep.c (mips_gdbarch_init): Likewise. * mn10300-tdep.c (mn10300_gdbarch_init): Likewise. * msp430-tdep.c (msp430_gdbarch_init): Likewise. * mt-tdep.c (mt_registers_info): Likewise. * nat/aarch64-linux.c (aarch64_linux_new_thread): Likewise. * nat/linux-btrace.c (linux_enable_bts): Likewise. (linux_enable_pt): Likewise. * nat/linux-osdata.c (linux_xfer_osdata_processes): Likewise. (linux_xfer_osdata_processgroups): Likewise. * nios2-tdep.c (nios2_gdbarch_init): Likewise. * nto-procfs.c (procfs_meminfo): Likewise. * objc-lang.c (start_msglist): Likewise. (selectors_info): Likewise. (classes_info): Likewise. (find_methods): Likewise. * objfiles.c (allocate_objfile): Likewise. (update_section_map): Likewise. * osabi.c (gdbarch_register_osabi): Likewise. (gdbarch_register_osabi_sniffer): Likewise. * parse.c (start_arglist): Likewise. * ppc-linux-nat.c (hwdebug_find_thread_points_by_tid): Likewise. (hwdebug_insert_point): Likewise. * printcmd.c (display_command): Likewise. (ui_printf): Likewise. * procfs.c (create_procinfo): Likewise. (load_syscalls): Likewise. (proc_get_LDT_entry): Likewise. (proc_update_threads): Likewise. * prologue-value.c (make_pv_area): Likewise. (pv_area_store): Likewise. * psymtab.c (extend_psymbol_list): Likewise. (init_psymbol_list): Likewise. (allocate_psymtab): Likewise. * python/py-inferior.c (add_thread_object): Likewise. * python/py-param.c (compute_enum_values): Likewise. * python/py-value.c (valpy_call): Likewise. * python/py-varobj.c (py_varobj_iter_next): Likewise. * python/python.c (ensure_python_env): Likewise. * record-btrace.c (record_btrace_start_replaying): Likewise. * record-full.c (record_full_reg_alloc): Likewise. (record_full_mem_alloc): Likewise. (record_full_end_alloc): Likewise. (record_full_core_xfer_partial): Likewise. * regcache.c (get_thread_arch_aspace_regcache): Likewise. * remote-fileio.c (remote_fileio_init_fd_map): Likewise. * remote-notif.c (remote_notif_state_allocate): Likewise. * remote.c (demand_private_info): Likewise. (remote_notif_stop_alloc_reply): Likewise. (remote_enable_btrace): Likewise. * reverse.c (save_bookmark_command): Likewise. * rl78-tdep.c (rl78_gdbarch_init): Likewise. * rx-tdep.c (rx_gdbarch_init): Likewise. * s390-linux-nat.c (s390_insert_watchpoint): Likewise. * ser-go32.c (dos_get_tty_state): Likewise. (dos_copy_tty_state): Likewise. * ser-mingw.c (ser_windows_open): Likewise. (ser_console_wait_handle): Likewise. (ser_console_get_tty_state): Likewise. (make_pipe_state): Likewise. (net_windows_open): Likewise. * ser-unix.c (hardwire_get_tty_state): Likewise. (hardwire_copy_tty_state): Likewise. * solib-aix.c (solib_aix_new_lm_info): Likewise. * solib-dsbt.c (dsbt_current_sos): Likewise. (dsbt_relocate_main_executable): Likewise. * solib-frv.c (frv_current_sos): Likewise. (frv_relocate_main_executable): Likewise. * solib-spu.c (spu_bfd_fopen): Likewise. * solib-svr4.c (lm_info_read): Likewise. (svr4_copy_library_list): Likewise. (svr4_default_sos): Likewise. * source.c (find_source_lines): Likewise. (line_info): Likewise. (add_substitute_path_rule): Likewise. * spu-linux-nat.c (spu_bfd_open): Likewise. * spu-tdep.c (info_spu_dma_cmdlist): Likewise. * stabsread.c (dbx_lookup_type): Likewise. (read_type): Likewise. (read_member_functions): Likewise. (read_struct_fields): Likewise. (read_baseclasses): Likewise. (read_args): Likewise. (_initialize_stabsread): Likewise. * stack.c (func_command): Likewise. * stap-probe.c (handle_stap_probe): Likewise. * symfile.c (addrs_section_sort): Likewise. (addr_info_make_relative): Likewise. (load_section_callback): Likewise. (add_symbol_file_command): Likewise. (init_filename_language_table): Likewise. * symtab.c (create_filename_seen_cache): Likewise. (sort_search_symbols_remove_dups): Likewise. (search_symbols): Likewise. * target.c (make_cleanup_restore_target_terminal): Likewise. * thread.c (new_thread): Likewise. (enable_thread_stack_temporaries): Likewise. (make_cleanup_restore_current_thread): Likewise. (thread_apply_all_command): Likewise. * tic6x-tdep.c (tic6x_gdbarch_init): Likewise. * top.c (gdb_readline_wrapper): Likewise. * tracefile-tfile.c (tfile_trace_file_writer_new): Likewise. * tracepoint.c (trace_find_line_command): Likewise. (all_tracepoint_actions_and_cleanup): Likewise. (make_cleanup_restore_current_traceframe): Likewise. (get_uploaded_tp): Likewise. (get_uploaded_tsv): Likewise. * tui/tui-data.c (tui_alloc_generic_win_info): Likewise. (tui_alloc_win_info): Likewise. (tui_alloc_content): Likewise. (tui_add_content_elements): Likewise. * tui/tui-disasm.c (tui_find_disassembly_address): Likewise. (tui_set_disassem_content): Likewise. * ui-file.c (ui_file_new): Likewise. (stdio_file_new): Likewise. (tee_file_new): Likewise. * utils.c (make_cleanup_restore_integer): Likewise. (add_internal_problem_command): Likewise. * v850-tdep.c (v850_gdbarch_init): Likewise. * valops.c (find_oload_champ): Likewise. * value.c (allocate_value_lazy): Likewise. (record_latest_value): Likewise. (create_internalvar): Likewise. * varobj.c (install_variable): Likewise. (new_variable): Likewise. (new_root_variable): Likewise. (cppush): Likewise. (_initialize_varobj): Likewise. * windows-nat.c (windows_make_so): Likewise. * x86-nat.c (x86_add_process): Likewise. * xcoffread.c (arrange_linetable): Likewise. (allocate_include_entry): Likewise. (process_linenos): Likewise. (SYMBOL_DUP): Likewise. (xcoff_start_psymtab): Likewise. (xcoff_end_psymtab): Likewise. * xml-support.c (gdb_xml_parse_attr_ulongest): Likewise. * xtensa-tdep.c (xtensa_register_type): Likewise. * gdbarch.c: Regenerate. * gdbarch.h: Regenerate. gdb/gdbserver/ChangeLog: * ax.c (gdb_parse_agent_expr): Likewise. (compile_bytecodes): Likewise. * dll.c (loaded_dll): Likewise. * event-loop.c (append_callback_event): Likewise. (create_file_handler): Likewise. (create_file_event): Likewise. * hostio.c (handle_open): Likewise. * inferiors.c (add_thread): Likewise. (add_process): Likewise. * linux-aarch64-low.c (aarch64_linux_new_process): Likewise. * linux-arm-low.c (arm_new_process): Likewise. (arm_new_thread): Likewise. * linux-low.c (add_to_pid_list): Likewise. (linux_add_process): Likewise. (handle_extended_wait): Likewise. (add_lwp): Likewise. (enqueue_one_deferred_signal): Likewise. (enqueue_pending_signal): Likewise. (linux_resume_one_lwp_throw): Likewise. (linux_resume_one_thread): Likewise. (linux_read_memory): Likewise. (linux_write_memory): Likewise. * linux-mips-low.c (mips_linux_new_process): Likewise. (mips_linux_new_thread): Likewise. (mips_add_watchpoint): Likewise. * linux-x86-low.c (initialize_low_arch): Likewise. * lynx-low.c (lynx_add_process): Likewise. * mem-break.c (set_raw_breakpoint_at): Likewise. (set_breakpoint): Likewise. (add_condition_to_breakpoint): Likewise. (add_commands_to_breakpoint): Likewise. (clone_agent_expr): Likewise. (clone_one_breakpoint): Likewise. * regcache.c (new_register_cache): Likewise. * remote-utils.c (look_up_one_symbol): Likewise. * server.c (queue_stop_reply): Likewise. (start_inferior): Likewise. (queue_stop_reply_callback): Likewise. (handle_target_event): Likewise. * spu-low.c (fetch_ppc_memory): Likewise. (store_ppc_memory): Likewise. * target.c (set_target_ops): Likewise. * thread-db.c (thread_db_load_search): Likewise. (try_thread_db_load_1): Likewise. * tracepoint.c (add_tracepoint): Likewise. (add_tracepoint_action): Likewise. (create_trace_state_variable): Likewise. (cmd_qtdpsrc): Likewise. (cmd_qtro): Likewise. (add_while_stepping_state): Likewise. * win32-low.c (child_add_thread): Likewise. (get_image_name): Likewise.
2015-08-27 05:16:07 +08:00
btinfo->insn_history = XCNEW (struct btrace_insn_history);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
btinfo->insn_history->begin = *begin;
btinfo->insn_history->end = *end;
}
/* See btrace.h. */
void
btrace_set_call_history (struct btrace_thread_info *btinfo,
const struct btrace_call_iterator *begin,
const struct btrace_call_iterator *end)
{
gdb_assert (begin->btinfo == end->btinfo);
if (btinfo->call_history == NULL)
Replace some xmalloc-family functions with XNEW-family ones This patch is part of the make-gdb-buildable-in-C++ effort. The idea is to change some calls to the xmalloc family of functions to calls to the equivalents in the XNEW family. This avoids adding an explicit cast, so it keeps the code a bit more readable. Some of them also map relatively well to a C++ equivalent (XNEW (struct foo) -> new foo), so it will be possible to do scripted replacements if needed. I only changed calls that were obviously allocating memory for one or multiple "objects". Allocation of variable sizes (such as strings or buffer handling) will be for later (and won't use XNEW). - xmalloc (sizeof (struct foo)) -> XNEW (struct foo) - xmalloc (num * sizeof (struct foo)) -> XNEWVEC (struct foo, num) - xcalloc (1, sizeof (struct foo)) -> XCNEW (struct foo) - xcalloc (num, sizeof (struct foo)) -> XCNEWVEC (struct foo, num) - xrealloc (p, num * sizeof (struct foo) -> XRESIZEVEC (struct foo, p, num) - obstack_alloc (ob, sizeof (struct foo)) -> XOBNEW (ob, struct foo) - obstack_alloc (ob, num * sizeof (struct foo)) -> XOBNEWVEC (ob, struct foo, num) - alloca (sizeof (struct foo)) -> XALLOCA (struct foo) - alloca (num * sizeof (struct foo)) -> XALLOCAVEC (struct foo, num) Some instances of xmalloc followed by memset to zero the buffer were replaced by XCNEW or XCNEWVEC. I regtested on x86-64, Ubuntu 14.04, but the patch touches many architecture-specific files. For those I'll have to rely on the buildbot or people complaining that I broke their gdb. gdb/ChangeLog: * aarch64-linux-nat.c (aarch64_add_process): Likewise. * aarch64-tdep.c (aarch64_gdbarch_init): Likewise. * ada-exp.y (write_ambiguous_var): Likewise. * ada-lang.c (resolve_subexp): Likewise. (user_select_syms): Likewise. (assign_aggregate): Likewise. (ada_evaluate_subexp): Likewise. (cache_symbol): Likewise. * addrmap.c (allocate_key): Likewise. (addrmap_create_mutable): Likewise. * aix-thread.c (sync_threadlists): Likewise. * alpha-tdep.c (alpha_push_dummy_call): Likewise. (alpha_gdbarch_init): Likewise. * amd64-windows-tdep.c (amd64_windows_push_arguments): Likewise. * arm-linux-nat.c (arm_linux_add_process): Likewise. * arm-linux-tdep.c (arm_linux_displaced_step_copy_insn): Likewise. * arm-tdep.c (push_stack_item): Likewise. (arm_displaced_step_copy_insn): Likewise. (arm_gdbarch_init): Likewise. (_initialize_arm_tdep): Likewise. * avr-tdep.c (push_stack_item): Likewise. * ax-general.c (new_agent_expr): Likewise. * block.c (block_initialize_namespace): Likewise. * breakpoint.c (alloc_counted_command_line): Likewise. (update_dprintf_command_list): Likewise. (parse_breakpoint_sals): Likewise. (decode_static_tracepoint_spec): Likewise. (until_break_command): Likewise. (clear_command): Likewise. (update_global_location_list): Likewise. (get_breakpoint_objfile_data) Likewise. * btrace.c (ftrace_new_function): Likewise. (btrace_set_insn_history): Likewise. (btrace_set_call_history): Likewise. * buildsym.c (add_symbol_to_list): Likewise. (record_pending_block): Likewise. (start_subfile): Likewise. (start_buildsym_compunit): Likewise. (push_subfile): Likewise. (end_symtab_get_static_block): Likewise. (buildsym_init): Likewise. * cli/cli-cmds.c (source_command): Likewise. * cli/cli-decode.c (add_cmd): Likewise. * cli/cli-script.c (build_command_line): Likewise. (setup_user_args): Likewise. (realloc_body_list): Likewise. (process_next_line): Likewise. (copy_command_lines): Likewise. * cli/cli-setshow.c (do_set_command): Likewise. * coff-pe-read.c (read_pe_exported_syms): Likewise. * coffread.c (coff_locate_sections): Likewise. (coff_symtab_read): Likewise. (coff_read_struct_type): Likewise. * common/cleanups.c (make_my_cleanup2): Likewise. * common/common-exceptions.c (throw_it): Likewise. * common/filestuff.c (make_cleanup_close): Likewise. * common/format.c (parse_format_string): Likewise. * common/queue.h (DEFINE_QUEUE_P): Likewise. * compile/compile-object-load.c (munmap_list_add): Likewise. (compile_object_load): Likewise. * compile/compile-object-run.c (compile_object_run): Likewise. * compile/compile.c (append_args): Likewise. * corefile.c (specify_exec_file_hook): Likewise. * cp-support.c (make_symbol_overload_list): Likewise. * cris-tdep.c (push_stack_item): Likewise. (cris_gdbarch_init): Likewise. * ctf.c (ctf_trace_file_writer_new): Likewise. * dbxread.c (init_header_files): Likewise. (add_new_header_file): Likewise. (init_bincl_list): Likewise. (dbx_end_psymtab): Likewise. (start_psymtab): Likewise. (dbx_end_psymtab): Likewise. * dcache.c (dcache_init): Likewise. * dictionary.c (dict_create_hashed): Likewise. (dict_create_hashed_expandable): Likewise. (dict_create_linear): Likewise. (dict_create_linear_expandable): Likewise. * dtrace-probe.c (dtrace_process_dof_probe): Likewise. * dummy-frame.c (register_dummy_frame_dtor): Likewise. * dwarf2-frame-tailcall.c (cache_new_ref1): Likewise. * dwarf2-frame.c (dwarf2_build_frame_info): Likewise. (decode_frame_entry_1): Likewise. * dwarf2expr.c (new_dwarf_expr_context): Likewise. * dwarf2loc.c (dwarf2_compile_expr_to_ax): Likewise. * dwarf2read.c (dwarf2_has_info): Likewise. (create_signatured_type_table_from_index): Likewise. (dwarf2_read_index): Likewise. (dw2_get_file_names_reader): Likewise. (create_all_type_units): Likewise. (read_cutu_die_from_dwo): Likewise. (init_tu_and_read_dwo_dies): Likewise. (init_cutu_and_read_dies): Likewise. (create_all_comp_units): Likewise. (queue_comp_unit): Likewise. (inherit_abstract_dies): Likewise. (read_call_site_scope): Likewise. (dwarf2_add_field): Likewise. (dwarf2_add_typedef): Likewise. (dwarf2_add_member_fn): Likewise. (attr_to_dynamic_prop): Likewise. (abbrev_table_alloc_abbrev): Likewise. (abbrev_table_read_table): Likewise. (add_include_dir): Likewise. (add_file_name): Likewise. (dwarf_decode_line_header): Likewise. (dwarf2_const_value_attr): Likewise. (dwarf_alloc_block): Likewise. (parse_macro_definition): Likewise. (set_die_type): Likewise. (write_psymtabs_to_index): Likewise. (create_cus_from_index): Likewise. (dwarf2_create_include_psymtab): Likewise. (process_psymtab_comp_unit_reader): Likewise. (build_type_psymtab_dependencies): Likewise. (read_comp_units_from_section): Likewise. (compute_compunit_symtab_includes): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (read_func_scope): Likewise. (process_structure_scope): Likewise. (mark_common_block_symbol_computed): Likewise. (load_partial_dies): Likewise. (dwarf2_symbol_mark_computed): Likewise. * elfread.c (elf_symfile_segments): Likewise. (elf_read_minimal_symbols): Likewise. * environ.c (make_environ): Likewise. * eval.c (evaluate_subexp_standard): Likewise. * event-loop.c (create_file_handler): Likewise. (create_async_signal_handler): Likewise. (create_async_event_handler): Likewise. (create_timer): Likewise. * exec.c (build_section_table): Likewise. * fbsd-nat.c (fbsd_remember_child): Likewise. * fork-child.c (fork_inferior): Likewise. * frv-tdep.c (new_variant): Likewise. * gdbarch.sh (gdbarch_alloc): Likewise. (append_name): Likewise. * gdbtypes.c (rank_function): Likewise. (copy_type_recursive): Likewise. (add_dyn_prop): Likewise. * gnu-nat.c (make_proc): Likewise. (make_inf): Likewise. (gnu_write_inferior): Likewise. * gnu-v3-abi.c (build_gdb_vtable_type): Likewise. (build_std_type_info_type): Likewise. * guile/scm-param.c (compute_enum_list): Likewise. * guile/scm-utils.c (gdbscm_parse_function_args): Likewise. * guile/scm-value.c (gdbscm_value_call): Likewise. * h8300-tdep.c (h8300_gdbarch_init): Likewise. * hppa-tdep.c (hppa_init_objfile_priv_data): Likewise. (read_unwind_info): Likewise. * ia64-tdep.c (ia64_gdbarch_init): Likewise. * infcall.c (dummy_frame_context_saver_setup): Likewise. (call_function_by_hand_dummy): Likewise. * infcmd.c (step_once): Likewise. (finish_forward): Likewise. (attach_command): Likewise. (notice_new_inferior): Likewise. * inferior.c (add_inferior_silent): Likewise. * infrun.c (add_displaced_stepping_state): Likewise. (save_infcall_control_state): Likewise. (save_inferior_ptid): Likewise. (_initialize_infrun): Likewise. * jit.c (bfd_open_from_target_memory): Likewise. (jit_gdbarch_data_init): Likewise. * language.c (add_language): Likewise. * linespec.c (decode_line_2): Likewise. * linux-nat.c (add_to_pid_list): Likewise. (add_initial_lwp): Likewise. * linux-thread-db.c (add_thread_db_info): Likewise. (record_thread): Likewise. (info_auto_load_libthread_db): Likewise. * m32c-tdep.c (m32c_gdbarch_init): Likewise. * m68hc11-tdep.c (m68hc11_gdbarch_init): Likewise. * m68k-tdep.c (m68k_gdbarch_init): Likewise. * m88k-tdep.c (m88k_analyze_prologue): Likewise. * macrocmd.c (macro_define_command): Likewise. * macroexp.c (gather_arguments): Likewise. * macroscope.c (sal_macro_scope): Likewise. * macrotab.c (new_macro_table): Likewise. * mdebugread.c (push_parse_stack): Likewise. (parse_partial_symbols): Likewise. (parse_symbol): Likewise. (psymtab_to_symtab_1): Likewise. (new_block): Likewise. (new_psymtab): Likewise. (mdebug_build_psymtabs): Likewise. (add_pending): Likewise. (elfmdebug_build_psymtabs): Likewise. * mep-tdep.c (mep_gdbarch_init): Likewise. * mi/mi-main.c (mi_execute_command): Likewise. * mi/mi-parse.c (mi_parse_argv): Likewise. * minidebug.c (lzma_open): Likewise. * minsyms.c (terminate_minimal_symbol_table): Likewise. * mips-linux-nat.c (mips_linux_insert_watchpoint): Likewise. * mips-tdep.c (mips_gdbarch_init): Likewise. * mn10300-tdep.c (mn10300_gdbarch_init): Likewise. * msp430-tdep.c (msp430_gdbarch_init): Likewise. * mt-tdep.c (mt_registers_info): Likewise. * nat/aarch64-linux.c (aarch64_linux_new_thread): Likewise. * nat/linux-btrace.c (linux_enable_bts): Likewise. (linux_enable_pt): Likewise. * nat/linux-osdata.c (linux_xfer_osdata_processes): Likewise. (linux_xfer_osdata_processgroups): Likewise. * nios2-tdep.c (nios2_gdbarch_init): Likewise. * nto-procfs.c (procfs_meminfo): Likewise. * objc-lang.c (start_msglist): Likewise. (selectors_info): Likewise. (classes_info): Likewise. (find_methods): Likewise. * objfiles.c (allocate_objfile): Likewise. (update_section_map): Likewise. * osabi.c (gdbarch_register_osabi): Likewise. (gdbarch_register_osabi_sniffer): Likewise. * parse.c (start_arglist): Likewise. * ppc-linux-nat.c (hwdebug_find_thread_points_by_tid): Likewise. (hwdebug_insert_point): Likewise. * printcmd.c (display_command): Likewise. (ui_printf): Likewise. * procfs.c (create_procinfo): Likewise. (load_syscalls): Likewise. (proc_get_LDT_entry): Likewise. (proc_update_threads): Likewise. * prologue-value.c (make_pv_area): Likewise. (pv_area_store): Likewise. * psymtab.c (extend_psymbol_list): Likewise. (init_psymbol_list): Likewise. (allocate_psymtab): Likewise. * python/py-inferior.c (add_thread_object): Likewise. * python/py-param.c (compute_enum_values): Likewise. * python/py-value.c (valpy_call): Likewise. * python/py-varobj.c (py_varobj_iter_next): Likewise. * python/python.c (ensure_python_env): Likewise. * record-btrace.c (record_btrace_start_replaying): Likewise. * record-full.c (record_full_reg_alloc): Likewise. (record_full_mem_alloc): Likewise. (record_full_end_alloc): Likewise. (record_full_core_xfer_partial): Likewise. * regcache.c (get_thread_arch_aspace_regcache): Likewise. * remote-fileio.c (remote_fileio_init_fd_map): Likewise. * remote-notif.c (remote_notif_state_allocate): Likewise. * remote.c (demand_private_info): Likewise. (remote_notif_stop_alloc_reply): Likewise. (remote_enable_btrace): Likewise. * reverse.c (save_bookmark_command): Likewise. * rl78-tdep.c (rl78_gdbarch_init): Likewise. * rx-tdep.c (rx_gdbarch_init): Likewise. * s390-linux-nat.c (s390_insert_watchpoint): Likewise. * ser-go32.c (dos_get_tty_state): Likewise. (dos_copy_tty_state): Likewise. * ser-mingw.c (ser_windows_open): Likewise. (ser_console_wait_handle): Likewise. (ser_console_get_tty_state): Likewise. (make_pipe_state): Likewise. (net_windows_open): Likewise. * ser-unix.c (hardwire_get_tty_state): Likewise. (hardwire_copy_tty_state): Likewise. * solib-aix.c (solib_aix_new_lm_info): Likewise. * solib-dsbt.c (dsbt_current_sos): Likewise. (dsbt_relocate_main_executable): Likewise. * solib-frv.c (frv_current_sos): Likewise. (frv_relocate_main_executable): Likewise. * solib-spu.c (spu_bfd_fopen): Likewise. * solib-svr4.c (lm_info_read): Likewise. (svr4_copy_library_list): Likewise. (svr4_default_sos): Likewise. * source.c (find_source_lines): Likewise. (line_info): Likewise. (add_substitute_path_rule): Likewise. * spu-linux-nat.c (spu_bfd_open): Likewise. * spu-tdep.c (info_spu_dma_cmdlist): Likewise. * stabsread.c (dbx_lookup_type): Likewise. (read_type): Likewise. (read_member_functions): Likewise. (read_struct_fields): Likewise. (read_baseclasses): Likewise. (read_args): Likewise. (_initialize_stabsread): Likewise. * stack.c (func_command): Likewise. * stap-probe.c (handle_stap_probe): Likewise. * symfile.c (addrs_section_sort): Likewise. (addr_info_make_relative): Likewise. (load_section_callback): Likewise. (add_symbol_file_command): Likewise. (init_filename_language_table): Likewise. * symtab.c (create_filename_seen_cache): Likewise. (sort_search_symbols_remove_dups): Likewise. (search_symbols): Likewise. * target.c (make_cleanup_restore_target_terminal): Likewise. * thread.c (new_thread): Likewise. (enable_thread_stack_temporaries): Likewise. (make_cleanup_restore_current_thread): Likewise. (thread_apply_all_command): Likewise. * tic6x-tdep.c (tic6x_gdbarch_init): Likewise. * top.c (gdb_readline_wrapper): Likewise. * tracefile-tfile.c (tfile_trace_file_writer_new): Likewise. * tracepoint.c (trace_find_line_command): Likewise. (all_tracepoint_actions_and_cleanup): Likewise. (make_cleanup_restore_current_traceframe): Likewise. (get_uploaded_tp): Likewise. (get_uploaded_tsv): Likewise. * tui/tui-data.c (tui_alloc_generic_win_info): Likewise. (tui_alloc_win_info): Likewise. (tui_alloc_content): Likewise. (tui_add_content_elements): Likewise. * tui/tui-disasm.c (tui_find_disassembly_address): Likewise. (tui_set_disassem_content): Likewise. * ui-file.c (ui_file_new): Likewise. (stdio_file_new): Likewise. (tee_file_new): Likewise. * utils.c (make_cleanup_restore_integer): Likewise. (add_internal_problem_command): Likewise. * v850-tdep.c (v850_gdbarch_init): Likewise. * valops.c (find_oload_champ): Likewise. * value.c (allocate_value_lazy): Likewise. (record_latest_value): Likewise. (create_internalvar): Likewise. * varobj.c (install_variable): Likewise. (new_variable): Likewise. (new_root_variable): Likewise. (cppush): Likewise. (_initialize_varobj): Likewise. * windows-nat.c (windows_make_so): Likewise. * x86-nat.c (x86_add_process): Likewise. * xcoffread.c (arrange_linetable): Likewise. (allocate_include_entry): Likewise. (process_linenos): Likewise. (SYMBOL_DUP): Likewise. (xcoff_start_psymtab): Likewise. (xcoff_end_psymtab): Likewise. * xml-support.c (gdb_xml_parse_attr_ulongest): Likewise. * xtensa-tdep.c (xtensa_register_type): Likewise. * gdbarch.c: Regenerate. * gdbarch.h: Regenerate. gdb/gdbserver/ChangeLog: * ax.c (gdb_parse_agent_expr): Likewise. (compile_bytecodes): Likewise. * dll.c (loaded_dll): Likewise. * event-loop.c (append_callback_event): Likewise. (create_file_handler): Likewise. (create_file_event): Likewise. * hostio.c (handle_open): Likewise. * inferiors.c (add_thread): Likewise. (add_process): Likewise. * linux-aarch64-low.c (aarch64_linux_new_process): Likewise. * linux-arm-low.c (arm_new_process): Likewise. (arm_new_thread): Likewise. * linux-low.c (add_to_pid_list): Likewise. (linux_add_process): Likewise. (handle_extended_wait): Likewise. (add_lwp): Likewise. (enqueue_one_deferred_signal): Likewise. (enqueue_pending_signal): Likewise. (linux_resume_one_lwp_throw): Likewise. (linux_resume_one_thread): Likewise. (linux_read_memory): Likewise. (linux_write_memory): Likewise. * linux-mips-low.c (mips_linux_new_process): Likewise. (mips_linux_new_thread): Likewise. (mips_add_watchpoint): Likewise. * linux-x86-low.c (initialize_low_arch): Likewise. * lynx-low.c (lynx_add_process): Likewise. * mem-break.c (set_raw_breakpoint_at): Likewise. (set_breakpoint): Likewise. (add_condition_to_breakpoint): Likewise. (add_commands_to_breakpoint): Likewise. (clone_agent_expr): Likewise. (clone_one_breakpoint): Likewise. * regcache.c (new_register_cache): Likewise. * remote-utils.c (look_up_one_symbol): Likewise. * server.c (queue_stop_reply): Likewise. (start_inferior): Likewise. (queue_stop_reply_callback): Likewise. (handle_target_event): Likewise. * spu-low.c (fetch_ppc_memory): Likewise. (store_ppc_memory): Likewise. * target.c (set_target_ops): Likewise. * thread-db.c (thread_db_load_search): Likewise. (try_thread_db_load_1): Likewise. * tracepoint.c (add_tracepoint): Likewise. (add_tracepoint_action): Likewise. (create_trace_state_variable): Likewise. (cmd_qtdpsrc): Likewise. (cmd_qtro): Likewise. (add_while_stepping_state): Likewise. * win32-low.c (child_add_thread): Likewise. (get_image_name): Likewise.
2015-08-27 05:16:07 +08:00
btinfo->call_history = XCNEW (struct btrace_call_history);
btrace: change branch trace data structure The branch trace is represented as 3 vectors: - a block vector - a instruction vector - a function vector Each vector (except for the first) is computed from the one above. Change this into a graph where a node represents a sequence of instructions belonging to the same function and where we have three types of edges to connect the function segments: - control flow - same function (instance) - call stack This allows us to navigate in the branch trace. We will need this for "record goto" and reverse execution. This patch introduces the data structure and computes the control flow edges. It also introduces iterator structs to simplify iterating over the branch trace in control-flow order. It also fixes PR gdb/15240 since now recursive calls are handled correctly. Fix the test that got the number of expected fib instances and also the function numbers wrong. The current instruction had been part of the branch trace. This will look odd once we start support for reverse execution. Remove it. We still keep it in the trace itself to allow extending the branch trace more easily in the future. 2014-01-16 Markus Metzger <markus.t.metzger@intel.com> * btrace.h (struct btrace_func_link): New. (enum btrace_function_flag): New. (struct btrace_inst): Rename to ... (struct btrace_insn): ...this. Update all users. (struct btrace_func) <ibegin, iend>: Remove. (struct btrace_func_link): New. (struct btrace_func): Rename to ... (struct btrace_function): ...this. Update all users. (struct btrace_function) <segment, flow, up, insn, insn_offset) (number, level, flags>: New. (struct btrace_insn_iterator): Rename to ... (struct btrace_insn_history): ...this. Update all users. (struct btrace_insn_iterator, btrace_call_iterator): New. (struct btrace_target_info) <btrace, itrace, ftrace>: Remove. (struct btrace_target_info) <begin, end, level> <insn_history, call_history>: New. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * btrace.c (btrace_init_insn_iterator) (btrace_init_func_iterator, compute_itrace): Remove. (ftrace_print_function_name, ftrace_print_filename) (ftrace_skip_file): Change parameter to const. (ftrace_init_func): Remove. (ftrace_debug): Use new btrace_function fields. (ftrace_function_switched): Also consider gaining and losing symbol information). (ftrace_print_insn_addr, ftrace_new_call, ftrace_new_return) (ftrace_new_switch, ftrace_find_caller, ftrace_new_function) (ftrace_update_caller, ftrace_fixup_caller, ftrace_new_tailcall): New. (ftrace_new_function): Move. Remove debug print. (ftrace_update_lines, ftrace_update_insns): New. (ftrace_update_function): Check for call, ret, and jump. (compute_ftrace): Renamed to ... (btrace_compute_ftrace): ...this. Rewritten to compute call stack. (btrace_fetch, btrace_clear): Updated. (btrace_insn_get, btrace_insn_number, btrace_insn_begin) (btrace_insn_end, btrace_insn_prev, btrace_insn_next) (btrace_insn_cmp, btrace_find_insn_by_number, btrace_call_get) (btrace_call_number, btrace_call_begin, btrace_call_end) (btrace_call_prev, btrace_call_next, btrace_call_cmp) (btrace_find_function_by_number, btrace_set_insn_history) (btrace_set_call_history): New. * record-btrace.c (require_btrace): Use new btrace thread info fields. (record_btrace_info, btrace_insn_history) (record_btrace_insn_history, record_btrace_insn_history_range): Use new btrace thread info fields and new iterator. (btrace_func_history_src_line): Rename to ... (btrace_call_history_src_line): ...this. Use new btrace thread info fields. (btrace_func_history): Rename to ... (btrace_call_history): ...this. Use new btrace thread info fields and new iterator. (record_btrace_call_history, record_btrace_call_history_range): Use new btrace thread info fields and new iterator. testsuite/ * gdb.btrace/function_call_history.exp: Fix expected function trace. * gdb.btrace/instruction_history.exp: Initialize traced. Remove traced_functions.
2013-03-22 21:32:47 +08:00
btinfo->call_history->begin = *begin;
btinfo->call_history->end = *end;
}
/* See btrace.h. */
int
btrace_is_replaying (struct thread_info *tp)
{
return tp->btrace.replay != NULL;
}
/* See btrace.h. */
int
btrace_is_empty (struct thread_info *tp)
{
struct btrace_insn_iterator begin, end;
struct btrace_thread_info *btinfo;
btinfo = &tp->btrace;
if (btinfo->functions.empty ())
return 1;
btrace_insn_begin (&begin, btinfo);
btrace_insn_end (&end, btinfo);
return btrace_insn_cmp (&begin, &end) == 0;
}
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
/* Forward the cleanup request. */
static void
do_btrace_data_cleanup (void *arg)
{
Add some more casts (1/2) Note: I needed to split this patch in two, otherwise it's too big for the mailing list. This patch adds explicit casts to situations where a void pointer is assigned to a pointer to the "real" type. Building in C++ mode requires those assignments to use an explicit cast. This includes, for example: - callback arguments (cleanups, comparison functions, ...) - data attached to some object (objfile, program space, etc) in the form of a void pointer - "user data" passed to some function This patch comes from the commit "(mostly) auto-generated patch to insert casts needed for C++", taken from Pedro's C++ branch. Only files built on x86 with --enable-targets=all are modified, so the native files for other arches will need to be dealt with separately. I built-tested this with --enable-targets=all and reg-tested. To my surprise, a test case (selftest.exp) had to be adjusted. Here's the ChangeLog entry. Again, this was relatively quick to make despite the length, thanks to David Malcom's script, although I don't believe it's very useful information in that particular case... gdb/ChangeLog: * aarch64-tdep.c (aarch64_make_prologue_cache): Add cast(s). (aarch64_make_stub_cache): Likewise. (value_of_aarch64_user_reg): Likewise. * ada-lang.c (ada_inferior_data_cleanup): Likewise. (get_ada_inferior_data): Likewise. (get_ada_pspace_data): Likewise. (ada_pspace_data_cleanup): Likewise. (ada_complete_symbol_matcher): Likewise. (ada_exc_search_name_matches): Likewise. * ada-tasks.c (get_ada_tasks_pspace_data): Likewise. (get_ada_tasks_inferior_data): Likewise. * addrmap.c (addrmap_mutable_foreach_worker): Likewise. (splay_obstack_alloc): Likewise. (splay_obstack_free): Likewise. * alpha-linux-tdep.c (alpha_linux_supply_gregset): Likewise. (alpha_linux_collect_gregset): Likewise. (alpha_linux_supply_fpregset): Likewise. (alpha_linux_collect_fpregset): Likewise. * alpha-mdebug-tdep.c (alpha_mdebug_frame_unwind_cache): Likewise. * alpha-tdep.c (alpha_lds): Likewise. (alpha_sts): Likewise. (alpha_sigtramp_frame_unwind_cache): Likewise. (alpha_heuristic_frame_unwind_cache): Likewise. (alpha_supply_int_regs): Likewise. (alpha_fill_int_regs): Likewise. (alpha_supply_fp_regs): Likewise. (alpha_fill_fp_regs): Likewise. * alphanbsd-tdep.c (alphanbsd_supply_fpregset): Likewise. (alphanbsd_aout_supply_gregset): Likewise. (alphanbsd_supply_gregset): Likewise. * amd64-linux-tdep.c (amd64_linux_init_abi): Likewise. (amd64_x32_linux_init_abi): Likewise. * amd64-nat.c (amd64_supply_native_gregset): Likewise. (amd64_collect_native_gregset): Likewise. * amd64-tdep.c (amd64_frame_cache): Likewise. (amd64_sigtramp_frame_cache): Likewise. (amd64_epilogue_frame_cache): Likewise. (amd64_supply_fxsave): Likewise. (amd64_supply_xsave): Likewise. (amd64_collect_fxsave): Likewise. (amd64_collect_xsave): Likewise. * amd64-windows-tdep.c (amd64_windows_frame_cache): Likewise. * amd64obsd-tdep.c (amd64obsd_trapframe_cache): Likewise. * arm-linux-tdep.c (arm_linux_supply_gregset): Likewise. (arm_linux_collect_gregset): Likewise. (arm_linux_supply_nwfpe): Likewise. (arm_linux_collect_nwfpe): Likewise. (arm_linux_supply_vfp): Likewise. (arm_linux_collect_vfp): Likewise. * arm-tdep.c (arm_find_mapping_symbol): Likewise. (arm_prologue_unwind_stop_reason): Likewise. (arm_prologue_this_id): Likewise. (arm_prologue_prev_register): Likewise. (arm_exidx_data_free): Likewise. (arm_find_exidx_entry): Likewise. (arm_stub_this_id): Likewise. (arm_m_exception_this_id): Likewise. (arm_m_exception_prev_register): Likewise. (arm_normal_frame_base): Likewise. (gdb_print_insn_arm): Likewise. (arm_objfile_data_free): Likewise. (arm_record_special_symbol): Likewise. (value_of_arm_user_reg): Likewise. * armbsd-tdep.c (armbsd_supply_fpregset): Likewise. (armbsd_supply_gregset): Likewise. * auto-load.c (auto_load_pspace_data_cleanup): Likewise. (get_auto_load_pspace_data): Likewise. (hash_loaded_script_entry): Likewise. (eq_loaded_script_entry): Likewise. (clear_section_scripts): Likewise. (collect_matching_scripts): Likewise. * auxv.c (auxv_inferior_data_cleanup): Likewise. (get_auxv_inferior_data): Likewise. * avr-tdep.c (avr_frame_unwind_cache): Likewise. * ax-general.c (do_free_agent_expr_cleanup): Likewise. * bfd-target.c (target_bfd_xfer_partial): Likewise. (target_bfd_xclose): Likewise. (target_bfd_get_section_table): Likewise. * bfin-tdep.c (bfin_frame_cache): Likewise. * block.c (find_block_in_blockvector): Likewise. (call_site_for_pc): Likewise. (block_find_non_opaque_type_preferred): Likewise. * break-catch-sig.c (signal_catchpoint_insert_location): Likewise. (signal_catchpoint_remove_location): Likewise. (signal_catchpoint_breakpoint_hit): Likewise. (signal_catchpoint_print_one): Likewise. (signal_catchpoint_print_mention): Likewise. (signal_catchpoint_print_recreate): Likewise. * break-catch-syscall.c (get_catch_syscall_inferior_data): Likewise. * breakpoint.c (do_cleanup_counted_command_line): Likewise. (bp_location_compare_addrs): Likewise. (get_first_locp_gte_addr): Likewise. (check_tracepoint_command): Likewise. (do_map_commands_command): Likewise. (get_breakpoint_objfile_data): Likewise. (free_breakpoint_probes): Likewise. (do_captured_breakpoint_query): Likewise. (compare_breakpoints): Likewise. (bp_location_compare): Likewise. (bpstat_remove_breakpoint_callback): Likewise. (do_delete_breakpoint_cleanup): Likewise. * bsd-uthread.c (bsd_uthread_set_supply_uthread): Likewise. (bsd_uthread_set_collect_uthread): Likewise. (bsd_uthread_activate): Likewise. (bsd_uthread_fetch_registers): Likewise. (bsd_uthread_store_registers): Likewise. * btrace.c (check_xml_btrace_version): Likewise. (parse_xml_btrace_block): Likewise. (parse_xml_btrace_pt_config_cpu): Likewise. (parse_xml_btrace_pt_raw): Likewise. (parse_xml_btrace_pt): Likewise. (parse_xml_btrace_conf_bts): Likewise. (parse_xml_btrace_conf_pt): Likewise. (do_btrace_data_cleanup): Likewise. * c-typeprint.c (find_typedef_for_canonicalize): Likewise. * charset.c (cleanup_iconv): Likewise. (do_cleanup_iterator): Likewise. * cli-out.c (cli_uiout_dtor): Likewise. (cli_table_begin): Likewise. (cli_table_body): Likewise. (cli_table_end): Likewise. (cli_table_header): Likewise. (cli_begin): Likewise. (cli_end): Likewise. (cli_field_int): Likewise. (cli_field_skip): Likewise. (cli_field_string): Likewise. (cli_field_fmt): Likewise. (cli_spaces): Likewise. (cli_text): Likewise. (cli_message): Likewise. (cli_wrap_hint): Likewise. (cli_flush): Likewise. (cli_redirect): Likewise. (out_field_fmt): Likewise. (field_separator): Likewise. (cli_out_set_stream): Likewise. * cli/cli-cmds.c (compare_symtabs): Likewise. * cli/cli-dump.c (call_dump_func): Likewise. (restore_section_callback): Likewise. * cli/cli-script.c (clear_hook_in_cleanup): Likewise. (do_restore_user_call_depth): Likewise. (do_free_command_lines_cleanup): Likewise. * coff-pe-read.c (get_section_vmas): Likewise. (pe_as16): Likewise. (pe_as32): Likewise. * coffread.c (coff_symfile_read): Likewise. * common/agent.c (agent_look_up_symbols): Likewise. * common/filestuff.c (do_close_cleanup): Likewise. * common/format.c (free_format_pieces_cleanup): Likewise. * common/vec.c (vec_o_reserve): Likewise. * compile/compile-c-support.c (print_one_macro): Likewise. * compile/compile-c-symbols.c (hash_symbol_error): Likewise. (eq_symbol_error): Likewise. (del_symbol_error): Likewise. (error_symbol_once): Likewise. (gcc_convert_symbol): Likewise. (gcc_symbol_address): Likewise. (hash_symname): Likewise. (eq_symname): Likewise. * compile/compile-c-types.c (hash_type_map_instance): Likewise. (eq_type_map_instance): Likewise. (insert_type): Likewise. (convert_type): Likewise. * compile/compile-object-load.c (munmap_listp_free_cleanup): Likewise. (setup_sections): Likewise. (link_hash_table_free): Likewise. (copy_sections): Likewise. * compile/compile-object-run.c (do_module_cleanup): Likewise. * compile/compile.c (compile_print_value): Likewise. (do_rmdir): Likewise. (cleanup_compile_instance): Likewise. (cleanup_unlink_file): Likewise. * completer.c (free_completion_tracker): Likewise. * corelow.c (add_to_spuid_list): Likewise. * cp-namespace.c (reset_directive_searched): Likewise. * cp-support.c (reset_directive_searched): Likewise. * cris-tdep.c (cris_sigtramp_frame_unwind_cache): Likewise. (cris_frame_unwind_cache): Likewise. * d-lang.c (builtin_d_type): Likewise. * d-namespace.c (reset_directive_searched): Likewise. * dbxread.c (dbx_free_symfile_info): Likewise. (do_free_bincl_list_cleanup): Likewise. * disasm.c (hash_dis_line_entry): Likewise. (eq_dis_line_entry): Likewise. (dis_asm_print_address): Likewise. (fprintf_disasm): Likewise. (do_ui_file_delete): Likewise. * doublest.c (convert_floatformat_to_doublest): Likewise. * dummy-frame.c (pop_dummy_frame_bpt): Likewise. (dummy_frame_prev_register): Likewise. (dummy_frame_this_id): Likewise. * dwarf2-frame-tailcall.c (cache_hash): Likewise. (cache_eq): Likewise. (cache_find): Likewise. (tailcall_frame_this_id): Likewise. (dwarf2_tailcall_prev_register_first): Likewise. (tailcall_frame_prev_register): Likewise. (tailcall_frame_dealloc_cache): Likewise. (tailcall_frame_prev_arch): Likewise. * dwarf2-frame.c (dwarf2_frame_state_free): Likewise. (dwarf2_frame_set_init_reg): Likewise. (dwarf2_frame_init_reg): Likewise. (dwarf2_frame_set_signal_frame_p): Likewise. (dwarf2_frame_signal_frame_p): Likewise. (dwarf2_frame_set_adjust_regnum): Likewise. (dwarf2_frame_adjust_regnum): Likewise. (clear_pointer_cleanup): Likewise. (dwarf2_frame_cache): Likewise. (find_cie): Likewise. (dwarf2_frame_find_fde): Likewise. * dwarf2expr.c (dwarf_expr_address_type): Likewise. (free_dwarf_expr_context_cleanup): Likewise. * dwarf2loc.c (locexpr_find_frame_base_location): Likewise. (locexpr_get_frame_base): Likewise. (loclist_find_frame_base_location): Likewise. (loclist_get_frame_base): Likewise. (dwarf_expr_dwarf_call): Likewise. (dwarf_expr_get_base_type): Likewise. (dwarf_expr_push_dwarf_reg_entry_value): Likewise. (dwarf_expr_get_obj_addr): Likewise. (entry_data_value_coerce_ref): Likewise. (entry_data_value_copy_closure): Likewise. (entry_data_value_free_closure): Likewise. (get_frame_address_in_block_wrapper): Likewise. (dwarf2_evaluate_property): Likewise. (dwarf2_compile_property_to_c): Likewise. (needs_frame_read_addr_from_reg): Likewise. (needs_frame_get_reg_value): Likewise. (needs_frame_frame_base): Likewise. (needs_frame_frame_cfa): Likewise. (needs_frame_tls_address): Likewise. (needs_frame_dwarf_call): Likewise. (needs_dwarf_reg_entry_value): Likewise. (get_ax_pc): Likewise. (locexpr_read_variable): Likewise. (locexpr_read_variable_at_entry): Likewise. (locexpr_read_needs_frame): Likewise. (locexpr_describe_location): Likewise. (locexpr_tracepoint_var_ref): Likewise. (locexpr_generate_c_location): Likewise. (loclist_read_variable): Likewise. (loclist_read_variable_at_entry): Likewise. (loclist_describe_location): Likewise. (loclist_tracepoint_var_ref): Likewise. (loclist_generate_c_location): Likewise. * dwarf2read.c (line_header_hash_voidp): Likewise. (line_header_eq_voidp): Likewise. (dwarf2_has_info): Likewise. (dwarf2_get_section_info): Likewise. (locate_dwz_sections): Likewise. (hash_file_name_entry): Likewise. (eq_file_name_entry): Likewise. (delete_file_name_entry): Likewise. (dw2_setup): Likewise. (dw2_get_file_names_reader): Likewise. (dw2_find_pc_sect_compunit_symtab): Likewise. (hash_signatured_type): Likewise. (eq_signatured_type): Likewise. (add_signatured_type_cu_to_table): Likewise. (create_debug_types_hash_table): Likewise. (lookup_dwo_signatured_type): Likewise. (lookup_dwp_signatured_type): Likewise. (lookup_signatured_type): Likewise. (hash_type_unit_group): Likewise. (eq_type_unit_group): Likewise. (get_type_unit_group): Likewise. (process_psymtab_comp_unit_reader): Likewise. (sort_tu_by_abbrev_offset): Likewise. (process_skeletonless_type_unit): Likewise. (psymtabs_addrmap_cleanup): Likewise. (dwarf2_read_symtab): Likewise. (psymtab_to_symtab_1): Likewise. (die_hash): Likewise. (die_eq): Likewise. (load_full_comp_unit_reader): Likewise. (reset_die_in_process): Likewise. (free_cu_line_header): Likewise. (handle_DW_AT_stmt_list): Likewise. (hash_dwo_file): Likewise. (eq_dwo_file): Likewise. (hash_dwo_unit): Likewise. (eq_dwo_unit): Likewise. (create_dwo_cu_reader): Likewise. (create_dwo_unit_in_dwp_v1): Likewise. (create_dwo_unit_in_dwp_v2): Likewise. (lookup_dwo_unit_in_dwp): Likewise. (dwarf2_locate_dwo_sections): Likewise. (dwarf2_locate_common_dwp_sections): Likewise. (dwarf2_locate_v2_dwp_sections): Likewise. (hash_dwp_loaded_cutus): Likewise. (eq_dwp_loaded_cutus): Likewise. (lookup_dwo_cutu): Likewise. (abbrev_table_free_cleanup): Likewise. (dwarf2_free_abbrev_table): Likewise. (find_partial_die_in_comp_unit): Likewise. (free_line_header_voidp): Likewise. (follow_die_offset): Likewise. (follow_die_sig_1): Likewise. (free_heap_comp_unit): Likewise. (free_stack_comp_unit): Likewise. (dwarf2_free_objfile): Likewise. (per_cu_offset_and_type_hash): Likewise. (per_cu_offset_and_type_eq): Likewise. (get_die_type_at_offset): Likewise. (partial_die_hash): Likewise. (partial_die_eq): Likewise. (dwarf2_per_objfile_free): Likewise. (hash_strtab_entry): Likewise. (eq_strtab_entry): Likewise. (add_string): Likewise. (hash_symtab_entry): Likewise. (eq_symtab_entry): Likewise. (delete_symtab_entry): Likewise. (cleanup_mapped_symtab): Likewise. (add_indices_to_cpool): Likewise. (hash_psymtab_cu_index): Likewise. (eq_psymtab_cu_index): Likewise. (add_address_entry_worker): Likewise. (unlink_if_set): Likewise. (write_one_signatured_type): Likewise. (save_gdb_index_command): Likewise. * elfread.c (elf_symtab_read): Likewise. (elf_gnu_ifunc_cache_hash): Likewise. (elf_gnu_ifunc_cache_eq): Likewise. (elf_gnu_ifunc_record_cache): Likewise. (elf_gnu_ifunc_resolve_by_cache): Likewise. (elf_get_probes): Likewise. (probe_key_free): Likewise. * f-lang.c (builtin_f_type): Likewise. * frame-base.c (frame_base_append_sniffer): Likewise. (frame_base_set_default): Likewise. (frame_base_find_by_frame): Likewise. * frame-unwind.c (frame_unwind_prepend_unwinder): Likewise. (frame_unwind_append_unwinder): Likewise. (frame_unwind_find_by_frame): Likewise. * frame.c (frame_addr_hash): Likewise. (frame_addr_hash_eq): Likewise. (frame_stash_find): Likewise. (do_frame_register_read): Likewise. (unwind_to_current_frame): Likewise. (frame_cleanup_after_sniffer): Likewise. * frv-linux-tdep.c (frv_linux_sigtramp_frame_cache): Likewise. * frv-tdep.c (frv_frame_unwind_cache): Likewise. * ft32-tdep.c (ft32_frame_cache): Likewise. * gcore.c (do_bfd_delete_cleanup): Likewise. (gcore_create_callback): Likewise. * gdb_bfd.c (hash_bfd): Likewise. (eq_bfd): Likewise. (gdb_bfd_open): Likewise. (free_one_bfd_section): Likewise. (gdb_bfd_ref): Likewise. (gdb_bfd_unref): Likewise. (get_section_descriptor): Likewise. (gdb_bfd_map_section): Likewise. (gdb_bfd_crc): Likewise. (gdb_bfd_mark_parent): Likewise. (gdb_bfd_record_inclusion): Likewise. (gdb_bfd_requires_relocations): Likewise. (print_one_bfd): Likewise. * gdbtypes.c (type_pair_hash): Likewise. (type_pair_eq): Likewise. (builtin_type): Likewise. (objfile_type): Likewise. * gnu-v3-abi.c (vtable_ptrdiff_type): Likewise. (vtable_address_point_offset): Likewise. (gnuv3_get_vtable): Likewise. (hash_value_and_voffset): Likewise. (eq_value_and_voffset): Likewise. (compare_value_and_voffset): Likewise. (compute_vtable_size): Likewise. (gnuv3_get_typeid_type): Likewise. * go-lang.c (builtin_go_type): Likewise. * guile/scm-block.c (bkscm_hash_block_smob): Likewise. (bkscm_eq_block_smob): Likewise. (bkscm_objfile_block_map): Likewise. (bkscm_del_objfile_blocks): Likewise. * guile/scm-breakpoint.c (bpscm_build_bp_list): Likewise. * guile/scm-disasm.c (gdbscm_disasm_read_memory_worker): Likewise. (gdbscm_disasm_print_address): Likewise. * guile/scm-frame.c (frscm_hash_frame_smob): Likewise. (frscm_eq_frame_smob): Likewise. (frscm_inferior_frame_map): Likewise. (frscm_del_inferior_frames): Likewise. * guile/scm-gsmob.c (gdbscm_add_objfile_ref): Likewise. * guile/scm-objfile.c (ofscm_handle_objfile_deleted): Likewise. (ofscm_objfile_smob_from_objfile): Likewise. * guile/scm-ports.c (ioscm_write): Likewise. (ioscm_file_port_delete): Likewise. (ioscm_file_port_rewind): Likewise. (ioscm_file_port_put): Likewise. (ioscm_file_port_write): Likewise. * guile/scm-progspace.c (psscm_handle_pspace_deleted): Likewise. (psscm_pspace_smob_from_pspace): Likewise. * guile/scm-safe-call.c (scscm_recording_pre_unwind_handler): Likewise. (scscm_recording_unwind_handler): Likewise. (gdbscm_with_catch): Likewise. (scscm_call_0_body): Likewise. (scscm_call_1_body): Likewise. (scscm_call_2_body): Likewise. (scscm_call_3_body): Likewise. (scscm_call_4_body): Likewise. (scscm_apply_1_body): Likewise. (scscm_eval_scheme_string): Likewise. (gdbscm_safe_eval_string): Likewise. (scscm_source_scheme_script): Likewise. (gdbscm_safe_source_script): Likewise. * guile/scm-string.c (gdbscm_call_scm_to_stringn): Likewise. (gdbscm_call_scm_from_stringn): Likewise. * guile/scm-symbol.c (syscm_hash_symbol_smob): Likewise. (syscm_eq_symbol_smob): Likewise. (syscm_get_symbol_map): Likewise. (syscm_del_objfile_symbols): Likewise. * guile/scm-symtab.c (stscm_hash_symtab_smob): Likewise. (stscm_eq_symtab_smob): Likewise. (stscm_objfile_symtab_map): Likewise. (stscm_del_objfile_symtabs): Likewise. * guile/scm-type.c (tyscm_hash_type_smob): Likewise. (tyscm_eq_type_smob): Likewise. (tyscm_type_map): Likewise. (tyscm_copy_type_recursive): Likewise. (save_objfile_types): Likewise. * guile/scm-utils.c (extract_arg): Likewise. * h8300-tdep.c (h8300_frame_cache): Likewise. * hppa-linux-tdep.c (hppa_linux_sigtramp_frame_unwind_cache): Likewise. * hppa-tdep.c (compare_unwind_entries): Likewise. (find_unwind_entry): Likewise. (hppa_frame_cache): Likewise. (hppa_stub_frame_unwind_cache): Likewise. * hppanbsd-tdep.c (hppanbsd_supply_gregset): Likewise. * hppaobsd-tdep.c (hppaobsd_supply_gregset): Likewise. (hppaobsd_supply_fpregset): Likewise. * i386-cygwin-tdep.c (core_process_module_section): Likewise. * i386-linux-tdep.c (i386_linux_init_abi): Likewise. * i386-tdep.c (i386_frame_cache): Likewise. (i386_epilogue_frame_cache): Likewise. (i386_sigtramp_frame_cache): Likewise. (i386_supply_gregset): Likewise. (i386_collect_gregset): Likewise. (i386_gdbarch_init): Likewise. * i386obsd-tdep.c (i386obsd_aout_supply_regset): Likewise. (i386obsd_trapframe_cache): Likewise. * i387-tdep.c (i387_supply_fsave): Likewise. (i387_collect_fsave): Likewise. (i387_supply_fxsave): Likewise. (i387_collect_fxsave): Likewise. (i387_supply_xsave): Likewise. (i387_collect_xsave): Likewise. * ia64-tdep.c (ia64_frame_cache): Likewise. (ia64_sigtramp_frame_cache): Likewise. * infcmd.c (attach_command_continuation): Likewise. (attach_command_continuation_free_args): Likewise. * inferior.c (restore_inferior): Likewise. (delete_thread_of_inferior): Likewise. * inflow.c (inflow_inferior_data_cleanup): Likewise. (get_inflow_inferior_data): Likewise. (inflow_inferior_exit): Likewise. * infrun.c (displaced_step_clear_cleanup): Likewise. (restore_current_uiout_cleanup): Likewise. (release_stop_context_cleanup): Likewise. (do_restore_infcall_suspend_state_cleanup): Likewise. (do_restore_infcall_control_state_cleanup): Likewise. (restore_inferior_ptid): Likewise. * inline-frame.c (block_starting_point_at): Likewise. * iq2000-tdep.c (iq2000_frame_cache): Likewise. * jit.c (get_jit_objfile_data): Likewise. (get_jit_program_space_data): Likewise. (jit_object_close_impl): Likewise. (jit_find_objf_with_entry_addr): Likewise. (jit_breakpoint_deleted): Likewise. (jit_unwind_reg_set_impl): Likewise. (jit_unwind_reg_get_impl): Likewise. (jit_dealloc_cache): Likewise. (jit_frame_sniffer): Likewise. (jit_frame_prev_register): Likewise. (jit_prepend_unwinder): Likewise. (jit_inferior_exit_hook): Likewise. (free_objfile_data): Likewise. * jv-lang.c (jv_per_objfile_free): Likewise. (get_dynamics_objfile): Likewise. (get_java_class_symtab): Likewise. (builtin_java_type): Likewise. * language.c (language_string_char_type): Likewise. (language_bool_type): Likewise. (language_lookup_primitive_type): Likewise. (language_lookup_primitive_type_as_symbol): Likewise. * linespec.c (hash_address_entry): Likewise. (eq_address_entry): Likewise. (iterate_inline_only): Likewise. (iterate_name_matcher): Likewise. (decode_line_2_compare_items): Likewise. (collect_one_symbol): Likewise. (compare_symbols): Likewise. (compare_msymbols): Likewise. (add_symtabs_to_list): Likewise. (collect_symbols): Likewise. (compare_msyms): Likewise. (add_minsym): Likewise. (cleanup_linespec_result): Likewise. * linux-fork.c (inferior_call_waitpid_cleanup): Likewise. * linux-nat.c (delete_lwp_cleanup): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (resume_stopped_resumed_lwps): Likewise. * linux-tdep.c (get_linux_gdbarch_data): Likewise. (invalidate_linux_cache_inf): Likewise. (get_linux_inferior_data): Likewise. (linux_find_memory_regions_thunk): Likewise. (linux_make_mappings_callback): Likewise. (linux_corefile_thread_callback): Likewise. (find_mapping_size): Likewise. * linux-thread-db.c (find_new_threads_callback): Likewise. * lm32-tdep.c (lm32_frame_cache): Likewise. * m2-lang.c (builtin_m2_type): Likewise. * m32c-tdep.c (m32c_analyze_frame_prologue): Likewise. * m32r-linux-tdep.c (m32r_linux_sigtramp_frame_cache): Likewise. (m32r_linux_supply_gregset): Likewise. (m32r_linux_collect_gregset): Likewise. * m32r-tdep.c (m32r_frame_unwind_cache): Likewise. * m68hc11-tdep.c (m68hc11_frame_unwind_cache): Likewise. * m68k-tdep.c (m68k_frame_cache): Likewise. * m68kbsd-tdep.c (m68kbsd_supply_fpregset): Likewise. (m68kbsd_supply_gregset): Likewise. * m68klinux-tdep.c (m68k_linux_sigtramp_frame_cache): Likewise. * m88k-tdep.c (m88k_frame_cache): Likewise. (m88k_supply_gregset): Likewise. gdb/gdbserver/ChangeLog: * dll.c (match_dll): Add cast(s). (unloaded_dll): Likewise. * linux-low.c (second_thread_of_pid_p): Likewise. (delete_lwp_callback): Likewise. (count_events_callback): Likewise. (select_event_lwp_callback): Likewise. (linux_set_resume_request): Likewise. * server.c (accumulate_file_name_length): Likewise. (emit_dll_description): Likewise. (handle_qxfer_threads_worker): Likewise. (visit_actioned_threads): Likewise. * thread-db.c (any_thread_of): Likewise. * tracepoint.c (same_process_p): Likewise. (match_blocktype): Likewise. (build_traceframe_info_xml): Likewise. gdb/testsuite/ChangeLog: * gdb.gdb/selftest.exp (do_steps_and_nexts): Adjust expected source line.
2015-09-26 02:08:07 +08:00
btrace_data_fini ((struct btrace_data *) arg);
btrace: add struct btrace_data Add a structure to hold the branch trace data and an enum to describe the format of that data. So far, only BTS is supported. Also added a NONE format to indicate that no branch trace data is available. This will make it easier to support different branch trace formats in the future. 2015-02-09 Markus Metzger <markus.t.metzger@intel.com> * Makefile.in (SFILES): Add common/btrace-common.c. (COMMON_OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * btrace.c (parse_xml_btrace): Update parameters. (parse_xml_btrace_block): Set format field. (btrace_add_pc, btrace_fetch): Use struct btrace_data. (do_btrace_data_cleanup, make_cleanup_btrace_data): New. (btrace_compute_ftrace): Split into this and... (btrace_compute_ftrace_bts): ...this. (btrace_stitch_trace): Split into this and... (btrace_stitch_bts): ...this. * btrace.h (parse_xml_btrace): Update parameters. (make_cleanup_btrace_data): New. * common/btrace-common.c: New. * common/btrace-common.h: Include common-defs.h. (btrace_block_s): Update comment. (btrace_format): New. (btrace_format_string): New. (btrace_data_bts): New. (btrace_data): New. (btrace_data_init, btrace_data_fini, btrace_data_empty): New. * remote.c (remote_read_btrace): Update parameters. * target.c (target_read_btrace): Update parameters. * target.h (target_read_btrace): Update parameters. (target_ops)<to_read_btrace>: Update parameters. * x86-linux-nat.c (x86_linux_read_btrace): Update parameters. * target-delegates.c: Regenerate. * target-debug (target_debug_print_struct_btrace_data_p): New. * nat/linux-btrace.c (linux_read_btrace): Split into this and... (linux_read_bts): ...this. * nat/linux-btrace.h (linux_read_btrace): Update parameters. gdbserver/ * Makefile.in (SFILES): Add common/btrace-common.c. (OBS): Add common/btrace-common.o. (btrace-common.o): Add build rules. * linux-low: Include btrace-common.h. (linux_low_read_btrace): Use struct btrace_data. Call btrace_data_init and btrace_data_fini.
2013-11-13 22:31:07 +08:00
}
/* See btrace.h. */
struct cleanup *
make_cleanup_btrace_data (struct btrace_data *data)
{
return make_cleanup (do_btrace_data_cleanup, data);
}
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
#if defined (HAVE_LIBIPT)
/* Print a single packet. */
static void
pt_print_packet (const struct pt_packet *packet)
{
switch (packet->type)
{
default:
printf_unfiltered (("[??: %x]"), packet->type);
break;
case ppt_psb:
printf_unfiltered (("psb"));
break;
case ppt_psbend:
printf_unfiltered (("psbend"));
break;
case ppt_pad:
printf_unfiltered (("pad"));
break;
case ppt_tip:
printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
packet->payload.ip.ipc,
packet->payload.ip.ip);
break;
case ppt_tip_pge:
printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
packet->payload.ip.ipc,
packet->payload.ip.ip);
break;
case ppt_tip_pgd:
printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
packet->payload.ip.ipc,
packet->payload.ip.ip);
break;
case ppt_fup:
printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
packet->payload.ip.ipc,
packet->payload.ip.ip);
break;
case ppt_tnt_8:
printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
packet->payload.tnt.bit_size,
packet->payload.tnt.payload);
break;
case ppt_tnt_64:
printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
packet->payload.tnt.bit_size,
packet->payload.tnt.payload);
break;
case ppt_pip:
printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
packet->payload.pip.nr ? (" nr") : (""));
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
break;
case ppt_tsc:
printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
break;
case ppt_cbr:
printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
break;
case ppt_mode:
switch (packet->payload.mode.leaf)
{
default:
printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
break;
case pt_mol_exec:
printf_unfiltered (("mode.exec%s%s"),
packet->payload.mode.bits.exec.csl
? (" cs.l") : (""),
packet->payload.mode.bits.exec.csd
? (" cs.d") : (""));
break;
case pt_mol_tsx:
printf_unfiltered (("mode.tsx%s%s"),
packet->payload.mode.bits.tsx.intx
? (" intx") : (""),
packet->payload.mode.bits.tsx.abrt
? (" abrt") : (""));
break;
}
break;
case ppt_ovf:
printf_unfiltered (("ovf"));
break;
case ppt_stop:
printf_unfiltered (("stop"));
break;
case ppt_vmcs:
printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
break;
case ppt_tma:
printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
packet->payload.tma.fc);
break;
case ppt_mtc:
printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
break;
case ppt_cyc:
printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
break;
case ppt_mnt:
printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
break;
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
}
}
/* Decode packets into MAINT using DECODER. */
static void
btrace_maint_decode_pt (struct btrace_maint_info *maint,
struct pt_packet_decoder *decoder)
{
int errcode;
for (;;)
{
struct btrace_pt_packet packet;
errcode = pt_pkt_sync_forward (decoder);
if (errcode < 0)
break;
for (;;)
{
pt_pkt_get_offset (decoder, &packet.offset);
errcode = pt_pkt_next (decoder, &packet.packet,
sizeof(packet.packet));
if (errcode < 0)
break;
if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
{
packet.errcode = pt_errcode (errcode);
VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
&packet);
}
}
if (errcode == -pte_eos)
break;
packet.errcode = pt_errcode (errcode);
VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
&packet);
warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
packet.offset, pt_errstr (packet.errcode));
}
if (errcode != -pte_eos)
warning (_("Failed to synchronize onto the Intel Processor Trace "
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
"stream: %s."), pt_errstr (pt_errcode (errcode)));
}
/* Update the packet history in BTINFO. */
static void
btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
{
volatile struct gdb_exception except;
struct pt_packet_decoder *decoder;
struct btrace_data_pt *pt;
struct pt_config config;
int errcode;
pt = &btinfo->data.variant.pt;
/* Nothing to do if there is no trace. */
if (pt->size == 0)
return;
memset (&config, 0, sizeof(config));
config.size = sizeof (config);
config.begin = pt->data;
config.end = pt->data + pt->size;
config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
config.cpu.family = pt->config.cpu.family;
config.cpu.model = pt->config.cpu.model;
config.cpu.stepping = pt->config.cpu.stepping;
errcode = pt_cpu_errata (&config.errata, &config.cpu);
if (errcode < 0)
error (_("Failed to configure the Intel Processor Trace decoder: %s."),
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
pt_errstr (pt_errcode (errcode)));
decoder = pt_pkt_alloc_decoder (&config);
if (decoder == NULL)
error (_("Failed to allocate the Intel Processor Trace decoder."));
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
TRY
{
btrace_maint_decode_pt (&btinfo->maint, decoder);
}
CATCH (except, RETURN_MASK_ALL)
{
pt_pkt_free_decoder (decoder);
if (except.reason < 0)
throw_exception (except);
}
END_CATCH
pt_pkt_free_decoder (decoder);
}
#endif /* !defined (HAVE_LIBIPT) */
/* Update the packet maintenance information for BTINFO and store the
low and high bounds into BEGIN and END, respectively.
Store the current iterator state into FROM and TO. */
static void
btrace_maint_update_packets (struct btrace_thread_info *btinfo,
unsigned int *begin, unsigned int *end,
unsigned int *from, unsigned int *to)
{
switch (btinfo->data.format)
{
default:
*begin = 0;
*end = 0;
*from = 0;
*to = 0;
break;
case BTRACE_FORMAT_BTS:
/* Nothing to do - we operate directly on BTINFO->DATA. */
*begin = 0;
*end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
*from = btinfo->maint.variant.bts.packet_history.begin;
*to = btinfo->maint.variant.bts.packet_history.end;
break;
#if defined (HAVE_LIBIPT)
case BTRACE_FORMAT_PT:
if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
btrace_maint_update_pt_packets (btinfo);
*begin = 0;
*end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
*from = btinfo->maint.variant.pt.packet_history.begin;
*to = btinfo->maint.variant.pt.packet_history.end;
break;
#endif /* defined (HAVE_LIBIPT) */
}
}
/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
update the current iterator position. */
static void
btrace_maint_print_packets (struct btrace_thread_info *btinfo,
unsigned int begin, unsigned int end)
{
switch (btinfo->data.format)
{
default:
break;
case BTRACE_FORMAT_BTS:
{
VEC (btrace_block_s) *blocks;
unsigned int blk;
blocks = btinfo->data.variant.bts.blocks;
for (blk = begin; blk < end; ++blk)
{
const btrace_block_s *block;
block = VEC_index (btrace_block_s, blocks, blk);
printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
core_addr_to_string_nz (block->begin),
core_addr_to_string_nz (block->end));
}
btinfo->maint.variant.bts.packet_history.begin = begin;
btinfo->maint.variant.bts.packet_history.end = end;
}
break;
#if defined (HAVE_LIBIPT)
case BTRACE_FORMAT_PT:
{
VEC (btrace_pt_packet_s) *packets;
unsigned int pkt;
packets = btinfo->maint.variant.pt.packets;
for (pkt = begin; pkt < end; ++pkt)
{
const struct btrace_pt_packet *packet;
packet = VEC_index (btrace_pt_packet_s, packets, pkt);
printf_unfiltered ("%u\t", pkt);
printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
if (packet->errcode == pte_ok)
pt_print_packet (&packet->packet);
else
printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
printf_unfiltered ("\n");
}
btinfo->maint.variant.pt.packet_history.begin = begin;
btinfo->maint.variant.pt.packet_history.end = end;
}
break;
#endif /* defined (HAVE_LIBIPT) */
}
}
/* Read a number from an argument string. */
static unsigned int
get_uint (char **arg)
{
char *begin, *end, *pos;
unsigned long number;
begin = *arg;
pos = skip_spaces (begin);
if (!isdigit (*pos))
error (_("Expected positive number, got: %s."), pos);
number = strtoul (pos, &end, 10);
if (number > UINT_MAX)
error (_("Number too big."));
*arg += (end - begin);
return (unsigned int) number;
}
/* Read a context size from an argument string. */
static int
get_context_size (char **arg)
{
char *pos;
int number;
pos = skip_spaces (*arg);
if (!isdigit (*pos))
error (_("Expected positive number, got: %s."), pos);
return strtol (pos, arg, 10);
}
/* Complain about junk at the end of an argument string. */
static void
no_chunk (char *arg)
{
if (*arg != 0)
error (_("Junk after argument: %s."), arg);
}
/* The "maintenance btrace packet-history" command. */
static void
maint_btrace_packet_history_cmd (char *arg, int from_tty)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
unsigned int size, begin, end, from, to;
tp = find_thread_ptid (inferior_ptid);
if (tp == NULL)
error (_("No thread."));
size = 10;
btinfo = &tp->btrace;
btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
if (begin == end)
{
printf_unfiltered (_("No trace.\n"));
return;
}
if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
{
from = to;
if (end - from < size)
size = end - from;
to = from + size;
}
else if (strcmp (arg, "-") == 0)
{
to = from;
if (to - begin < size)
size = to - begin;
from = to - size;
}
else
{
from = get_uint (&arg);
if (end <= from)
error (_("'%u' is out of range."), from);
arg = skip_spaces (arg);
if (*arg == ',')
{
arg = skip_spaces (++arg);
if (*arg == '+')
{
arg += 1;
size = get_context_size (&arg);
no_chunk (arg);
if (end - from < size)
size = end - from;
to = from + size;
}
else if (*arg == '-')
{
arg += 1;
size = get_context_size (&arg);
no_chunk (arg);
/* Include the packet given as first argument. */
from += 1;
to = from;
if (to - begin < size)
size = to - begin;
from = to - size;
}
else
{
to = get_uint (&arg);
/* Include the packet at the second argument and silently
truncate the range. */
if (to < end)
to += 1;
else
to = end;
no_chunk (arg);
}
}
else
{
no_chunk (arg);
if (end - from < size)
size = end - from;
to = from + size;
}
dont_repeat ();
}
btrace_maint_print_packets (btinfo, from, to);
}
/* The "maintenance btrace clear-packet-history" command. */
static void
maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
if (args != NULL && *args != 0)
error (_("Invalid argument."));
tp = find_thread_ptid (inferior_ptid);
if (tp == NULL)
error (_("No thread."));
btinfo = &tp->btrace;
/* Must clear the maint data before - it depends on BTINFO->DATA. */
btrace_maint_clear (btinfo);
btrace_data_clear (&btinfo->data);
}
/* The "maintenance btrace clear" command. */
static void
maint_btrace_clear_cmd (char *args, int from_tty)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
if (args != NULL && *args != 0)
error (_("Invalid argument."));
tp = find_thread_ptid (inferior_ptid);
if (tp == NULL)
error (_("No thread."));
btrace_clear (tp);
}
/* The "maintenance btrace" command. */
static void
maint_btrace_cmd (char *args, int from_tty)
{
help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
gdb_stdout);
}
/* The "maintenance set btrace" command. */
static void
maint_btrace_set_cmd (char *args, int from_tty)
{
help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
gdb_stdout);
}
/* The "maintenance show btrace" command. */
static void
maint_btrace_show_cmd (char *args, int from_tty)
{
help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
all_commands, gdb_stdout);
}
/* The "maintenance set btrace pt" command. */
static void
maint_btrace_pt_set_cmd (char *args, int from_tty)
{
help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
all_commands, gdb_stdout);
}
/* The "maintenance show btrace pt" command. */
static void
maint_btrace_pt_show_cmd (char *args, int from_tty)
{
help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
all_commands, gdb_stdout);
}
/* The "maintenance info btrace" command. */
static void
maint_info_btrace_cmd (char *args, int from_tty)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
const struct btrace_config *conf;
if (args != NULL && *args != 0)
error (_("Invalid argument."));
tp = find_thread_ptid (inferior_ptid);
if (tp == NULL)
error (_("No thread."));
btinfo = &tp->btrace;
conf = btrace_conf (btinfo);
if (conf == NULL)
error (_("No btrace configuration."));
printf_unfiltered (_("Format: %s.\n"),
btrace_format_string (conf->format));
switch (conf->format)
{
default:
break;
case BTRACE_FORMAT_BTS:
printf_unfiltered (_("Number of packets: %u.\n"),
VEC_length (btrace_block_s,
btinfo->data.variant.bts.blocks));
break;
#if defined (HAVE_LIBIPT)
case BTRACE_FORMAT_PT:
{
struct pt_version version;
version = pt_library_version ();
printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
version.minor, version.build,
version.ext != NULL ? version.ext : "");
btrace_maint_update_pt_packets (btinfo);
printf_unfiltered (_("Number of packets: %u.\n"),
VEC_length (btrace_pt_packet_s,
btinfo->maint.variant.pt.packets));
}
break;
#endif /* defined (HAVE_LIBIPT) */
}
}
/* The "maint show btrace pt skip-pad" show value function. */
static void
show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
struct cmd_list_element *c,
const char *value)
{
fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
}
/* Initialize btrace maintenance commands. */
void _initialize_btrace (void);
void
_initialize_btrace (void)
{
add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
_("Info about branch tracing data."), &maintenanceinfolist);
add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
_("Branch tracing maintenance commands."),
&maint_btrace_cmdlist, "maintenance btrace ",
0, &maintenancelist);
add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
Set branch tracing specific variables."),
&maint_btrace_set_cmdlist, "maintenance set btrace ",
0, &maintenance_set_cmdlist);
add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
Set Intel Processor Trace specific variables."),
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
&maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
0, &maint_btrace_set_cmdlist);
add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
Show branch tracing specific variables."),
&maint_btrace_show_cmdlist, "maintenance show btrace ",
0, &maintenance_show_cmdlist);
add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
Show Intel Processor Trace specific variables."),
btrace: maintenance commands Add maintenance commands that help debugging the btrace record target. The following new commands are added: maint info btrace Print information about branch tracing internals. maint btrace packet-history Print the raw branch tracing data. maint btrace clear-packet-history Discard the stored raw branch tracing data. maint btrace clear Discard all branch tracing data. It will be fetched and processed anew by the next "record" command. maint set|show btrace pt skip-pad Set and show whether PAD packets are skipped when computing the packet history. gdb/ * btrace.c: Include gdbcmd.h, cli/cli-utils.h, and ctype.h. (maint_btrace_cmdlist, maint_btrace_set_cmdlist) (maint_btrace_show_cmdlist, maint_btrace_pt_set_cmdlist) (maint_btrace_pt_show_cmdlist, maint_btrace_pt_skip_pad) (btrace_maint_clear): New. (btrace_fetch, btrace_clear): Call btrace_maint_clear. (pt_print_packet, btrace_maint_decode_pt) (btrace_maint_update_pt_packets, btrace_maint_update_packets) (btrace_maint_print_packets, get_uint, get_context_size, no_chunk) (maint_btrace_packet_history_cmd) (maint_btrace_clear_packet_history_cmd, maint_btrace_clear_cmd) (maint_btrace_cmd, maint_btrace_set_cmd, maint_btrace_show_cmd) (maint_btrace_pt_set_cmd, maint_btrace_pt_show_cmd) (maint_info_btrace_cmd, _initialize_btrace): New. * btrace.h (btrace_pt_packet, btrace_pt_packet_s) (btrace_maint_packet_history, btrace_maint_info): New. (btrace_thread_info) <maint>: New. * NEWS: Announce it. doc/ * gdb.texinfo (Maintenance Commands): Document "maint btrace" commands.
2014-02-03 21:35:28 +08:00
&maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
0, &maint_btrace_show_cmdlist);
add_setshow_boolean_cmd ("skip-pad", class_maintenance,
&maint_btrace_pt_skip_pad, _("\
Set whether PAD packets should be skipped in the btrace packet history."), _("\
Show whether PAD packets should be skipped in the btrace packet history."),_("\
When enabled, PAD packets are ignored in the btrace packet history."),
NULL, show_maint_btrace_pt_skip_pad,
&maint_btrace_pt_set_cmdlist,
&maint_btrace_pt_show_cmdlist);
add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
_("Print the raw branch tracing data.\n\
With no argument, print ten more packets after the previous ten-line print.\n\
With '-' as argument print ten packets before a previous ten-line print.\n\
One argument specifies the starting packet of a ten-line print.\n\
Two arguments with comma between specify starting and ending packets to \
print.\n\
Preceded with '+'/'-' the second argument specifies the distance from the \
first.\n"),
&maint_btrace_cmdlist);
add_cmd ("clear-packet-history", class_maintenance,
maint_btrace_clear_packet_history_cmd,
_("Clears the branch tracing packet history.\n\
Discards the raw branch tracing data but not the execution history data.\n\
"),
&maint_btrace_cmdlist);
add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
_("Clears the branch tracing data.\n\
Discards the raw branch tracing data and the execution history data.\n\
The next 'record' command will fetch the branch tracing data anew.\n\
"),
&maint_btrace_cmdlist);
}