This is the mail archive of the
gdb-patches@sourceware.org
mailing list for the GDB project.
[PATCH 02/11] btrace: Change parameters to use btrace_thread_info.
- From: Tim Wiederhake <tim dot wiederhake at intel dot com>
- To: gdb-patches at sourceware dot org
- Cc: markus dot t dot metzger at intel dot com
- Date: Fri, 17 Feb 2017 14:26:20 +0100
- Subject: [PATCH 02/11] btrace: Change parameters to use btrace_thread_info.
- Authentication-results: sourceware.org; auth=none
- References: <1487337989-6367-1-git-send-email-tim.wiederhake@intel.com>
This prepares the transition from function call segment pointers to indices in
a vector.
2017-02-17 Tim Wiederhake <tim.wiederhake@intel.com>
gdb/ChangeLog:
* btrace.c (ftrace_new_function, ftrace_fixup_caller, ftrace_new_call,
ftrace_new_tailcall, ftrace_find_caller, ftrace_find_call,
ftrace_new_return, ftrace_new_switch, ftrace_new_gap,
ftrace_update_function, ftrace_update_insns, ftrace_connect_bfun,
ftrace_connect_backtrace, ftrace_bridge_gap, btrace_compute_ftrace_bts,
ftrace_add_pt, btrace_compute_ftrace_pt): Changed to use struct
btrace_thread_info * as parameter. Adjusted comments where necessary.
---
gdb/btrace.c | 138 ++++++++++++++++++++++++++++++++---------------------------
1 file changed, 75 insertions(+), 63 deletions(-)
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 14a16a2..da8e0f7 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -202,17 +202,18 @@ ftrace_function_switched (const struct btrace_function *bfun,
return 0;
}
-/* Allocate and initialize a new branch trace function segment.
- PREV is the chronologically preceding function segment.
- MFUN and FUN are the symbol information we have for this function. */
+/* Allocate and initialize a new branch trace function segment at the end of
+ the trace. MFUN and FUN are the symbol information we have for this
+ function. */
static struct btrace_function *
-ftrace_new_function (struct btrace_function *prev,
+ftrace_new_function (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *bfun;
+ struct btrace_function *prev, *bfun;
+ prev = btinfo->end;
bfun = XCNEW (struct btrace_function);
bfun->msym = mfun;
@@ -258,7 +259,8 @@ ftrace_update_caller (struct btrace_function *bfun,
/* Fix up the caller for all segments of a function. */
static void
-ftrace_fixup_caller (struct btrace_function *bfun,
+ftrace_fixup_caller (struct btrace_thread_info *btinfo,
+ struct btrace_function *bfun,
struct btrace_function *caller,
enum btrace_function_flag flags)
{
@@ -275,18 +277,17 @@ ftrace_fixup_caller (struct btrace_function *bfun,
}
/* Add a new function segment for a call.
- CALLER is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_call (struct btrace_function *caller,
+ftrace_new_call (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
struct btrace_function *bfun;
- bfun = ftrace_new_function (caller, mfun, fun);
- bfun->up = caller;
+ bfun = ftrace_new_function (btinfo, mfun, fun);
+ bfun->up = btinfo->end;
bfun->level += 1;
ftrace_debug (bfun, "new call");
@@ -295,18 +296,17 @@ ftrace_new_call (struct btrace_function *caller,
}
/* Add a new function segment for a tail call.
- CALLER is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_tailcall (struct btrace_function *caller,
+ftrace_new_tailcall (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
struct btrace_function *bfun;
- bfun = ftrace_new_function (caller, mfun, fun);
- bfun->up = caller;
+ bfun = ftrace_new_function (btinfo, mfun, fun);
+ bfun->up = btinfo->end;
bfun->level += 1;
bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
@@ -331,7 +331,8 @@ ftrace_get_caller (struct btrace_function *bfun)
symbol information. */
static struct btrace_function *
-ftrace_find_caller (struct btrace_function *bfun,
+ftrace_find_caller (struct btrace_thread_info *btinfo,
+ struct btrace_function *bfun,
struct minimal_symbol *mfun,
struct symbol *fun)
{
@@ -353,7 +354,8 @@ ftrace_find_caller (struct btrace_function *bfun,
tail calls ending with a jump). */
static struct btrace_function *
-ftrace_find_call (struct btrace_function *bfun)
+ftrace_find_call (struct btrace_thread_info *btinfo,
+ struct btrace_function *bfun)
{
for (; bfun != NULL; bfun = bfun->up)
{
@@ -373,21 +375,21 @@ ftrace_find_call (struct btrace_function *bfun)
}
/* Add a continuation segment for a function into which we return.
- PREV is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_return (struct btrace_function *prev,
+ftrace_new_return (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *bfun, *caller;
+ struct btrace_function *prev, *bfun, *caller;
- bfun = ftrace_new_function (prev, mfun, fun);
+ prev = btinfo->end;
+ bfun = ftrace_new_function (btinfo, mfun, fun);
/* It is important to start at PREV's caller. Otherwise, we might find
PREV itself, if PREV is a recursive function. */
- caller = ftrace_find_caller (prev->up, mfun, fun);
+ caller = ftrace_find_caller (btinfo, prev->up, mfun, fun);
if (caller != NULL)
{
/* The caller of PREV is the preceding btrace function segment in this
@@ -412,7 +414,7 @@ ftrace_new_return (struct btrace_function *prev,
wrong or that the call is simply not included in the trace. */
/* Let's search for some actual call. */
- caller = ftrace_find_call (prev->up);
+ caller = ftrace_find_call (btinfo, prev->up);
if (caller == NULL)
{
/* There is no call in PREV's back trace. We assume that the
@@ -426,7 +428,7 @@ ftrace_new_return (struct btrace_function *prev,
bfun->level = prev->level - 1;
/* Fix up the call stack for PREV. */
- ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
+ ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
ftrace_debug (bfun, "new return - no caller");
}
@@ -452,19 +454,19 @@ ftrace_new_return (struct btrace_function *prev,
}
/* Add a new function segment for a function switch.
- PREV is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_switch (struct btrace_function *prev,
+ftrace_new_switch (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *bfun;
+ struct btrace_function *prev, *bfun;
/* This is an unexplained function switch. We can't really be sure about the
call stack, yet the best I can think of right now is to preserve it. */
- bfun = ftrace_new_function (prev, mfun, fun);
+ prev = btinfo->end;
+ bfun = ftrace_new_function (btinfo, mfun, fun);
bfun->up = prev->up;
bfun->flags = prev->flags;
@@ -474,20 +476,21 @@ ftrace_new_switch (struct btrace_function *prev,
}
/* Add a new function segment for a gap in the trace due to a decode error.
- PREV is the chronologically preceding function segment.
ERRCODE is the format-specific error code. */
static struct btrace_function *
-ftrace_new_gap (struct btrace_function *prev, int errcode)
+ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
{
- struct btrace_function *bfun;
+ struct btrace_function *prev, *bfun;
+
+ prev = btinfo->end;
/* We hijack prev if it was empty. */
if (prev != NULL && prev->errcode == 0
&& VEC_empty (btrace_insn_s, prev->insn))
bfun = prev;
else
- bfun = ftrace_new_function (prev, NULL, NULL);
+ bfun = ftrace_new_function (btinfo, NULL, NULL);
bfun->errcode = errcode;
@@ -496,17 +499,20 @@ ftrace_new_gap (struct btrace_function *prev, int errcode)
return bfun;
}
-/* Update BFUN with respect to the instruction at PC. This may create new
- function segments.
+/* Update the current function call segment at the end of the trace with
+ respect to the instruction at PC. This may create new function segments.
Return the chronologically latest function segment, never NULL. */
static struct btrace_function *
-ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
+ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
{
struct bound_minimal_symbol bmfun;
struct minimal_symbol *mfun;
struct symbol *fun;
struct btrace_insn *last;
+ struct btrace_function *bfun;
+
+ bfun = btinfo->end;
/* Try to determine the function we're in. We use both types of symbols
to avoid surprises when we sometimes get a full symbol and sometimes
@@ -520,7 +526,7 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
/* If we didn't have a function or if we had a gap before, we create one. */
if (bfun == NULL || bfun->errcode != 0)
- return ftrace_new_function (bfun, mfun, fun);
+ return ftrace_new_function (btinfo, mfun, fun);
/* Check the last instruction, if we have one.
We do this check first, since it allows us to fill in the call stack
@@ -548,9 +554,9 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
different frame id's. This will confuse stepping. */
fname = ftrace_print_function_name (bfun);
if (strcmp (fname, "_dl_runtime_resolve") == 0)
- return ftrace_new_tailcall (bfun, mfun, fun);
+ return ftrace_new_tailcall (btinfo, mfun, fun);
- return ftrace_new_return (bfun, mfun, fun);
+ return ftrace_new_return (btinfo, mfun, fun);
}
case BTRACE_INSN_CALL:
@@ -558,7 +564,7 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
if (last->pc + last->size == pc)
break;
- return ftrace_new_call (bfun, mfun, fun);
+ return ftrace_new_call (btinfo, mfun, fun);
case BTRACE_INSN_JUMP:
{
@@ -568,13 +574,13 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
/* A jump to the start of a function is (typically) a tail call. */
if (start == pc)
- return ftrace_new_tailcall (bfun, mfun, fun);
+ return ftrace_new_tailcall (btinfo, mfun, fun);
/* If we can't determine the function for PC, we treat a jump at
the end of the block as tail call if we're switching functions
and as an intra-function branch if we don't. */
if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
- return ftrace_new_tailcall (bfun, mfun, fun);
+ return ftrace_new_tailcall (btinfo, mfun, fun);
break;
}
@@ -589,18 +595,21 @@ ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
ftrace_print_function_name (bfun),
ftrace_print_filename (bfun));
- return ftrace_new_switch (bfun, mfun, fun);
+ return ftrace_new_switch (btinfo, mfun, fun);
}
return bfun;
}
-/* Add the instruction at PC to BFUN's instructions. */
+/* Add the instruction at PC to the instructions of the current function call
+ segment at the end of the trace. */
static void
-ftrace_update_insns (struct btrace_function *bfun,
+ftrace_update_insns (struct btrace_thread_info *btinfo,
const struct btrace_insn *insn)
{
+ struct btrace_function *bfun = btinfo->end;
+
VEC_safe_push (btrace_insn_s, bfun->insn, insn);
if (record_debug > 1)
@@ -704,7 +713,8 @@ ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
ftrace_connect_backtrace. */
static void
-ftrace_connect_bfun (struct btrace_function *prev,
+ftrace_connect_bfun (struct btrace_thread_info *btinfo,
+ struct btrace_function *prev,
struct btrace_function *next)
{
DEBUG_FTRACE ("connecting...");
@@ -727,7 +737,7 @@ ftrace_connect_bfun (struct btrace_function *prev,
if (next->up != NULL)
{
DEBUG_FTRACE ("using next's callers");
- ftrace_fixup_caller (prev, next->up, next->flags);
+ ftrace_fixup_caller (btinfo, prev, next->up, next->flags);
}
}
else if (next->up == NULL)
@@ -735,7 +745,7 @@ ftrace_connect_bfun (struct btrace_function *prev,
if (prev->up != NULL)
{
DEBUG_FTRACE ("using prev's callers");
- ftrace_fixup_caller (next, prev->up, prev->flags);
+ ftrace_fixup_caller (btinfo, next, prev->up, prev->flags);
}
}
else
@@ -761,7 +771,7 @@ ftrace_connect_bfun (struct btrace_function *prev,
DEBUG_FTRACE ("adding prev's tail calls to next");
- ftrace_fixup_caller (next, prev->up, prev->flags);
+ ftrace_fixup_caller (btinfo, next, prev->up, prev->flags);
for (prev = prev->up; prev != NULL; prev = prev->up)
{
@@ -772,7 +782,7 @@ ftrace_connect_bfun (struct btrace_function *prev,
ftrace_debug (prev, "..top");
ftrace_debug (caller, "..up");
- ftrace_fixup_caller (prev, caller, flags);
+ ftrace_fixup_caller (btinfo, prev, caller, flags);
/* If we skipped any tail calls, this may move CALLER to a
different function level.
@@ -803,7 +813,8 @@ ftrace_connect_bfun (struct btrace_function *prev,
ftrace_match_backtrace. */
static void
-ftrace_connect_backtrace (struct btrace_function *lhs,
+ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
+ struct btrace_function *lhs,
struct btrace_function *rhs)
{
while (lhs != NULL && rhs != NULL)
@@ -819,7 +830,7 @@ ftrace_connect_backtrace (struct btrace_function *lhs,
lhs = ftrace_get_caller (lhs);
rhs = ftrace_get_caller (rhs);
- ftrace_connect_bfun (prev, next);
+ ftrace_connect_bfun (btinfo, prev, next);
}
}
@@ -829,7 +840,8 @@ ftrace_connect_backtrace (struct btrace_function *lhs,
Returns non-zero if the gap could be bridged, zero otherwise. */
static int
-ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
+ftrace_bridge_gap (struct btrace_thread_info *btinfo,
+ struct btrace_function *lhs, struct btrace_function *rhs,
int min_matches)
{
struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
@@ -877,7 +889,7 @@ ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
BEST_L to BEST_R as they will already be on the same level. */
ftrace_fixup_level (rhs, best_l->level - best_r->level);
- ftrace_connect_backtrace (best_l, best_r);
+ ftrace_connect_backtrace (btinfo, best_l, best_r);
return best_matches;
}
@@ -935,7 +947,7 @@ btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
if (rhs == NULL)
continue;
- bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
+ bridged = ftrace_bridge_gap (&tp->btrace, lhs, rhs, min_matches);
/* Keep track of gaps we were not able to bridge and try again.
If we just pushed them to the end of GAPS we would risk an
@@ -1004,7 +1016,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
if (block->end < pc)
{
/* Indicate the gap in the trace. */
- btinfo->end = ftrace_new_gap (btinfo->end, BDE_BTS_OVERFLOW);
+ btinfo->end = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
if (btinfo->begin == NULL)
btinfo->begin = btinfo->end;
@@ -1017,7 +1029,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
break;
}
- btinfo->end = ftrace_update_function (btinfo->end, pc);
+ btinfo->end = ftrace_update_function (btinfo, pc);
if (btinfo->begin == NULL)
btinfo->begin = btinfo->end;
@@ -1041,7 +1053,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
insn.iclass = ftrace_classify_insn (gdbarch, pc);
insn.flags = 0;
- ftrace_update_insns (btinfo->end, &insn);
+ ftrace_update_insns (btinfo, &insn);
/* We're done once we pushed the instruction at the end. */
if (block->end == pc)
@@ -1052,7 +1064,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
{
/* Indicate the gap in the trace. We just added INSN so we're
not at the beginning. */
- btinfo->end = ftrace_new_gap (btinfo->end, BDE_BTS_INSN_SIZE);
+ btinfo->end = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
VEC_safe_push (bfun_s, *gaps, btinfo->end);
@@ -1158,7 +1170,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
from some other instruction. Indicate this as a trace gap. */
if (insn.enabled)
{
- btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_DISABLED);
+ btinfo->end = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
VEC_safe_push (bfun_s, *gaps, btinfo->end);
@@ -1173,7 +1185,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
/* Indicate trace overflows. */
if (insn.resynced)
{
- btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_OVERFLOW);
+ btinfo->end = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
if (btinfo->begin == NULL)
btinfo->begin = btinfo->end;
@@ -1186,7 +1198,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
btinfo->end->insn_offset - 1, offset, insn.ip);
}
- upd = ftrace_update_function (btinfo->end, insn.ip);
+ upd = ftrace_update_function (btinfo, insn.ip);
if (upd != btinfo->end)
{
btinfo->end = upd;
@@ -1203,14 +1215,14 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
btinsn.iclass = pt_reclassify_insn (insn.iclass);
btinsn.flags = pt_btrace_insn_flags (&insn);
- ftrace_update_insns (btinfo->end, &btinsn);
+ ftrace_update_insns (btinfo, &btinsn);
}
if (errcode == -pte_eos)
break;
/* Indicate the gap in the trace. */
- btinfo->end = ftrace_new_gap (btinfo->end, errcode);
+ btinfo->end = ftrace_new_gap (btinfo, errcode);
if (btinfo->begin == NULL)
btinfo->begin = btinfo->end;
@@ -1342,7 +1354,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
/* Indicate a gap in the trace if we quit trace processing. */
if (error.reason == RETURN_QUIT && btinfo->end != NULL)
{
- btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
+ btinfo->end = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
VEC_safe_push (bfun_s, *gaps, btinfo->end);
}
--
2.7.4