mirror of
https://sourceware.org/git/binutils-gdb.git
synced 2024-11-23 18:14:13 +08:00
btrace: Use binary search to find instruction.
Currently, btrace_find_insn_by_number will iterate over all function call segments to find the one that contains the needed instruction. This linear search is too slow for the upcoming Python bindings that will use this function to access instructions. This patch introduces a vector in struct btrace_thread_info that holds pointers to all recorded function segments and allows to use binary search. The proper solution is to turn the underlying tree into a vector of objects and use indices for access. This requires more work. A patch set is currently being worked on and will be published later. Signed-off-by: Tim Wiederhake <tim.wiederhake@intel.com> gdb/ChangeLog: * btrace.c (btrace_fetch): Copy function call segments pointer into a vector. (btrace_clear): Clear the vector. (btrace_find_insn_by_number): Use binary search to find the correct function call segment. * btrace.h (brace_fun_p): New typedef. (struct btrace_thread_info) <functions>: New field. Change-Id: I8a7f67e80bfe4ff62c4192f74a2153a70bf2a035
This commit is contained in:
parent
508352a9bf
commit
fdd2bd920b
51
gdb/btrace.c
51
gdb/btrace.c
@ -1839,13 +1839,19 @@ btrace_fetch (struct thread_info *tp)
|
|||||||
/* Compute the trace, provided we have any. */
|
/* Compute the trace, provided we have any. */
|
||||||
if (!btrace_data_empty (&btrace))
|
if (!btrace_data_empty (&btrace))
|
||||||
{
|
{
|
||||||
|
struct btrace_function *bfun;
|
||||||
|
|
||||||
/* Store the raw trace data. The stored data will be cleared in
|
/* Store the raw trace data. The stored data will be cleared in
|
||||||
btrace_clear, so we always append the new trace. */
|
btrace_clear, so we always append the new trace. */
|
||||||
btrace_data_append (&btinfo->data, &btrace);
|
btrace_data_append (&btinfo->data, &btrace);
|
||||||
btrace_maint_clear (btinfo);
|
btrace_maint_clear (btinfo);
|
||||||
|
|
||||||
|
VEC_truncate (btrace_fun_p, btinfo->functions, 0);
|
||||||
btrace_clear_history (btinfo);
|
btrace_clear_history (btinfo);
|
||||||
btrace_compute_ftrace (tp, &btrace);
|
btrace_compute_ftrace (tp, &btrace);
|
||||||
|
|
||||||
|
for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
|
||||||
|
VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
|
||||||
}
|
}
|
||||||
|
|
||||||
do_cleanups (cleanup);
|
do_cleanups (cleanup);
|
||||||
@ -1868,6 +1874,8 @@ btrace_clear (struct thread_info *tp)
|
|||||||
|
|
||||||
btinfo = &tp->btrace;
|
btinfo = &tp->btrace;
|
||||||
|
|
||||||
|
VEC_free (btrace_fun_p, btinfo->functions);
|
||||||
|
|
||||||
it = btinfo->begin;
|
it = btinfo->begin;
|
||||||
while (it != NULL)
|
while (it != NULL)
|
||||||
{
|
{
|
||||||
@ -2458,20 +2466,45 @@ btrace_find_insn_by_number (struct btrace_insn_iterator *it,
|
|||||||
unsigned int number)
|
unsigned int number)
|
||||||
{
|
{
|
||||||
const struct btrace_function *bfun;
|
const struct btrace_function *bfun;
|
||||||
|
unsigned int upper, lower;
|
||||||
|
|
||||||
|
if (VEC_empty (btrace_fun_p, btinfo->functions))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
lower = 0;
|
||||||
|
bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
|
||||||
|
if (number < bfun->insn_offset)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
|
||||||
|
bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
|
||||||
|
if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* We assume that there are no holes in the numbering. */
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
const unsigned int average = lower + (upper - lower) / 2;
|
||||||
|
|
||||||
|
bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
|
||||||
|
|
||||||
|
if (number < bfun->insn_offset)
|
||||||
|
{
|
||||||
|
upper = average - 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
|
||||||
|
{
|
||||||
|
lower = average + 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
|
|
||||||
if (bfun->insn_offset <= number)
|
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
if (bfun == NULL)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (bfun->insn_offset + ftrace_call_num_insn (bfun) <= number)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
it->function = bfun;
|
it->function = bfun;
|
||||||
it->index = number - bfun->insn_offset;
|
it->index = number - bfun->insn_offset;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,6 +187,9 @@ struct btrace_function
|
|||||||
btrace_function_flags flags;
|
btrace_function_flags flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct btrace_function *btrace_fun_p;
|
||||||
|
DEF_VEC_P (btrace_fun_p);
|
||||||
|
|
||||||
/* A branch trace instruction iterator. */
|
/* A branch trace instruction iterator. */
|
||||||
struct btrace_insn_iterator
|
struct btrace_insn_iterator
|
||||||
{
|
{
|
||||||
@ -337,6 +340,10 @@ struct btrace_thread_info
|
|||||||
struct btrace_function *begin;
|
struct btrace_function *begin;
|
||||||
struct btrace_function *end;
|
struct btrace_function *end;
|
||||||
|
|
||||||
|
/* Vector of pointer to decoded function segments. These are in execution
|
||||||
|
order with the first element == BEGIN and the last element == END. */
|
||||||
|
VEC (btrace_fun_p) *functions;
|
||||||
|
|
||||||
/* The function level offset. When added to each function's LEVEL,
|
/* The function level offset. When added to each function's LEVEL,
|
||||||
this normalizes the function levels such that the smallest level
|
this normalizes the function levels such that the smallest level
|
||||||
becomes zero. */
|
becomes zero. */
|
||||||
|
Loading…
Reference in New Issue
Block a user