This is the mail archive of the
gdb-patches@sourceware.org
mailing list for the GDB project.
[PATCH 01/11] btrace: Use struct btrace_thread_info fields directly.
- From: Tim Wiederhake <tim dot wiederhake at intel dot com>
- To: gdb-patches at sourceware dot org
- Cc: markus dot t dot metzger at intel dot com
- Date: Fri, 17 Feb 2017 14:26:19 +0100
- Subject: [PATCH 01/11] btrace: Use struct btrace_thread_info fields directly.
- Authentication-results: sourceware.org; auth=none
- References: <1487337989-6367-1-git-send-email-tim.wiederhake@intel.com>
This will later allow to remove BEGIN and END fields.
2017-02-17 Tim Wiederhake <tim.wiederhake@intel.com>
gdb/ChangeLog:
* btrace.c (btrace_compute_ftrace_bts, ftrace_add_pt): Use struct
btrace_thread_info fields directly.
(btrace_compute_ftrace_pt): Adjusted for change in ftrace_add_pt.
---
gdb/btrace.c | 94 +++++++++++++++++++++++++++---------------------------------
1 file changed, 43 insertions(+), 51 deletions(-)
diff --git a/gdb/btrace.c b/gdb/btrace.c
index 95dc7ab..14a16a2 100644
--- a/gdb/btrace.c
+++ b/gdb/btrace.c
@@ -976,16 +976,13 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
VEC (bfun_s) **gaps)
{
struct btrace_thread_info *btinfo;
- struct btrace_function *begin, *end;
struct gdbarch *gdbarch;
unsigned int blk;
int level;
gdbarch = target_gdbarch ();
btinfo = &tp->btrace;
- begin = btinfo->begin;
- end = btinfo->end;
- level = begin != NULL ? -btinfo->level : INT_MAX;
+ level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
blk = VEC_length (btrace_block_s, btrace->blocks);
while (blk != 0)
@@ -1007,27 +1004,27 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
if (block->end < pc)
{
/* Indicate the gap in the trace. */
- end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
- if (begin == NULL)
- begin = end;
+ btinfo->end = ftrace_new_gap (btinfo->end, BDE_BTS_OVERFLOW);
+ if (btinfo->begin == NULL)
+ btinfo->begin = btinfo->end;
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, btinfo->end);
warning (_("Recorded trace may be corrupted at instruction "
- "%u (pc = %s)."), end->insn_offset - 1,
+ "%u (pc = %s)."), btinfo->end->insn_offset - 1,
core_addr_to_string_nz (pc));
break;
}
- end = ftrace_update_function (end, pc);
- if (begin == NULL)
- begin = end;
+ btinfo->end = ftrace_update_function (btinfo->end, pc);
+ if (btinfo->begin == NULL)
+ btinfo->begin = btinfo->end;
/* Maintain the function level offset.
For all but the last block, we do it here. */
if (blk != 0)
- level = std::min (level, end->level);
+ level = std::min (level, btinfo->end->level);
size = 0;
TRY
@@ -1044,7 +1041,7 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
insn.iclass = ftrace_classify_insn (gdbarch, pc);
insn.flags = 0;
- ftrace_update_insns (end, &insn);
+ ftrace_update_insns (btinfo->end, &insn);
/* We're done once we pushed the instruction at the end. */
if (block->end == pc)
@@ -1055,12 +1052,12 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
{
/* Indicate the gap in the trace. We just added INSN so we're
not at the beginning. */
- end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
+ btinfo->end = ftrace_new_gap (btinfo->end, BDE_BTS_INSN_SIZE);
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, btinfo->end);
warning (_("Recorded trace may be incomplete at instruction %u "
- "(pc = %s)."), end->insn_offset - 1,
+ "(pc = %s)."), btinfo->end->insn_offset - 1,
core_addr_to_string_nz (pc));
break;
@@ -1075,13 +1072,10 @@ btrace_compute_ftrace_bts (struct thread_info *tp,
and is not really part of the execution history, it shouldn't
affect the level. */
if (blk == 0)
- level = std::min (level, end->level);
+ level = std::min (level, btinfo->end->level);
}
}
- btinfo->begin = begin;
- btinfo->end = end;
-
/* LEVEL is the minimal function level of all btrace function segments.
Define the global level offset to -LEVEL so all function levels are
normalized to start at zero. */
@@ -1126,16 +1120,13 @@ pt_btrace_insn_flags (const struct pt_insn *insn)
static void
ftrace_add_pt (struct pt_insn_decoder *decoder,
- struct btrace_function **pbegin,
- struct btrace_function **pend, int *plevel,
- VEC (bfun_s) **gaps)
+ struct btrace_thread_info *btinfo,
+ int *plevel, VEC (bfun_s) **gaps)
{
- struct btrace_function *begin, *end, *upd;
+ struct btrace_function *upd;
uint64_t offset;
int errcode;
- begin = *pbegin;
- end = *pend;
for (;;)
{
struct btrace_insn btinsn;
@@ -1158,7 +1149,7 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
break;
/* Look for gaps in the trace - unless we're at the beginning. */
- if (begin != NULL)
+ if (btinfo->begin != NULL)
{
/* Tracing is disabled and re-enabled each time we enter the
kernel. Most times, we continue from the same instruction we
@@ -1167,69 +1158,70 @@ ftrace_add_pt (struct pt_insn_decoder *decoder,
from some other instruction. Indicate this as a trace gap. */
if (insn.enabled)
{
- *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
+ btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_DISABLED);
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, btinfo->end);
pt_insn_get_offset (decoder, &offset);
warning (_("Non-contiguous trace at instruction %u (offset "
"= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
- end->insn_offset - 1, offset, insn.ip);
+ btinfo->end->insn_offset - 1, offset, insn.ip);
}
}
/* Indicate trace overflows. */
if (insn.resynced)
{
- *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
- if (begin == NULL)
- *pbegin = begin = end;
+ btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_OVERFLOW);
+ if (btinfo->begin == NULL)
+ btinfo->begin = btinfo->end;
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, btinfo->end);
pt_insn_get_offset (decoder, &offset);
warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
- ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
- offset, insn.ip);
+ ", pc = 0x%" PRIx64 ")."),
+ btinfo->end->insn_offset - 1, offset, insn.ip);
}
- upd = ftrace_update_function (end, insn.ip);
- if (upd != end)
+ upd = ftrace_update_function (btinfo->end, insn.ip);
+ if (upd != btinfo->end)
{
- *pend = end = upd;
+ btinfo->end = upd;
- if (begin == NULL)
- *pbegin = begin = upd;
+ if (btinfo->begin == NULL)
+ btinfo->begin = upd;
}
/* Maintain the function level offset. */
- *plevel = std::min (*plevel, end->level);
+ *plevel = std::min (*plevel, btinfo->end->level);
btinsn.pc = (CORE_ADDR) insn.ip;
btinsn.size = (gdb_byte) insn.size;
btinsn.iclass = pt_reclassify_insn (insn.iclass);
btinsn.flags = pt_btrace_insn_flags (&insn);
- ftrace_update_insns (end, &btinsn);
+ ftrace_update_insns (btinfo->end, &btinsn);
}
if (errcode == -pte_eos)
break;
/* Indicate the gap in the trace. */
- *pend = end = ftrace_new_gap (end, errcode);
- if (begin == NULL)
- *pbegin = begin = end;
+ btinfo->end = ftrace_new_gap (btinfo->end, errcode);
+ if (btinfo->begin == NULL)
+ btinfo->begin = btinfo->end;
- VEC_safe_push (bfun_s, *gaps, end);
+ VEC_safe_push (bfun_s, *gaps, btinfo->end);
pt_insn_get_offset (decoder, &offset);
warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
- ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
- offset, insn.ip, pt_errstr (pt_errcode (errcode)));
+ ", pc = 0x%" PRIx64 "): %s."), errcode,
+ btinfo->end->insn_offset - 1, offset, insn.ip,
+ pt_errstr (pt_errcode (errcode)));
}
}
@@ -1343,7 +1335,7 @@ btrace_compute_ftrace_pt (struct thread_info *tp,
error (_("Failed to configure the Intel Processor Trace decoder: "
"%s."), pt_errstr (pt_errcode (errcode)));
- ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
+ ftrace_add_pt (decoder, btinfo, &level, gaps);
}
CATCH (error, RETURN_MASK_ALL)
{
--
2.7.4