This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 08/16] linux, btrace: perf_event based branch tracing


From: Markus Metzger <markus.t.metzger@intel.com>

Implement branch tracing on Linux based on perf_event such taht it can be shared
between gdb and gdbserver.

The actual btrace target ops will be implemented on top.

2012-05-23 Markus Metzger <markus.t.metzger@intel.com>

  gdb/common/
  * linux_btrace.h: New file
  * linux_btrace.c: New file

  gdb/
  * Makefile.in: Add linux-btrace rules

  gdb/gdbserver/
  * Makefile.in: Add linux-btrace rules


---
 gdb/Makefile.in           |    7 +-
 gdb/common/linux-btrace.c |  368 +++++++++++++++++++++++++++++++++++++++++++++
 gdb/common/linux-btrace.h |   76 +++++++++
 gdb/gdbserver/Makefile.in |    6 +-
 4 files changed, 455 insertions(+), 2 deletions(-)
 create mode 100644 gdb/common/linux-btrace.c
 create mode 100644 gdb/common/linux-btrace.h

diff --git a/gdb/Makefile.in b/gdb/Makefile.in
index 5fa7c12..b3fdd4f 100644
--- a/gdb/Makefile.in
+++ b/gdb/Makefile.in
@@ -826,7 +826,8 @@ gnulib/import/extra/snippet/arg-nonnull.h gnulib/import/extra/snippet/c++defs.h
 gnulib/import/extra/snippet/warn-on-use.h \
 gnulib/import/stddef.in.h gnulib/import/inttypes.in.h inline-frame.h skip.h \
 common/common-utils.h common/xml-utils.h common/buffer.h common/ptid.h \
-common/linux-osdata.h gdb-dlfcn.h auto-load.h probe.h stap-probe.h
+common/linux-osdata.h gdb-dlfcn.h auto-load.h probe.h stap-probe.h \
+common/linux-btrace.h
 
 # Header files that already have srcdir in them, or which are in objdir.
 
@@ -1930,6 +1931,10 @@ vec.o: ${srcdir}/common/vec.c
 	$(COMPILE) $(srcdir)/common/vec.c
 	$(POSTCOMPILE)
 
+linux-btrace.o: ${srcdir}/common/linux-btrace.c
+	$(COMPILE) $(srcdir)/common/linux-btrace.c
+	$(POSTCOMPILE)
+
 #
 # gdb/tui/ dependencies
 #
diff --git a/gdb/common/linux-btrace.c b/gdb/common/linux-btrace.c
new file mode 100644
index 0000000..76bca75
--- /dev/null
+++ b/gdb/common/linux-btrace.c
@@ -0,0 +1,368 @@
+/* Linux-dependent part of branch trace support for GDB, and GDBserver.
+
+   Copyright (C) 2012 Free Software Foundation, Inc.
+
+   Contributed by Intel Corp. <markus.t.metzger@intel.com>
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#include "linux-btrace.h"
+#include "common-utils.h"
+#include <errno.h>
+
+#if HAVE_LINUX_PERF_EVENT_H
+
+#include <string.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/mman.h>
+#include <sys/user.h>
+
+typedef unsigned char  byte;
+
+/* A branch trace record in perf_event.  */
+struct perf_event_bts
+{
+  unsigned long long from;
+  unsigned long long to;
+};
+
+/* A perf_event branch trace sample.  */
+struct perf_event_sample
+{
+  struct perf_event_header header;
+  struct perf_event_bts    bts;
+};
+
+static inline volatile struct perf_event_mmap_page *
+perf_event_header (struct btrace_target_info* tinfo)
+{
+  return tinfo->buffer;
+}
+
+static inline size_t
+perf_event_mmap_size (const struct btrace_target_info *tinfo)
+{
+  /* The branch trace buffer is preceded by a configuration page.  */
+  return ((tinfo->size + 1) * PAGE_SIZE);
+}
+
+static inline size_t
+perf_event_buffer_size (struct btrace_target_info* tinfo)
+{
+  return (tinfo->size * PAGE_SIZE);
+}
+
+static inline const byte *
+perf_event_buffer_begin (struct btrace_target_info* tinfo)
+{
+  return ((const byte *) tinfo->buffer) + PAGE_SIZE;
+}
+
+static inline const byte *
+perf_event_buffer_end (struct btrace_target_info* tinfo)
+{
+  return perf_event_buffer_begin (tinfo) + perf_event_buffer_size (tinfo);
+}
+
+static inline int
+perf_event_skip_record (struct btrace_target_info* tinfo,
+                        const struct perf_event_bts *bts)
+{
+  if (tinfo->ptr_bits)
+    {
+      int shift = tinfo->ptr_bits - 1;
+
+      /* Branch trace records branches from kernel space to user space.  */
+      if (bts->from & (1ull << shift))
+        return 1;
+
+      /* Branch trace records branches from user space to kernel space.  */
+      if (bts->to & (1ull << shift))
+        return 1;
+    }
+
+  return 0;
+}
+
+static inline int
+perf_event_check_sample (const struct perf_event_sample *sample)
+{
+  if (sample->header.type != PERF_RECORD_SAMPLE)
+    return EINVAL;
+
+  if (sample->header.size != sizeof (*sample))
+    return EINVAL;
+
+  return 0;
+}
+
+/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
+   and to addresses (plus some header).
+
+   Start points into that buffer at the next sample position.
+   We read the collected samples backwards from start.
+
+   While reading the samples, we convert the information into a list of blocks.
+   For two adjacent samples s1 and s2, we form a block b such that b.begin =
+   s1.to and b.end = s2.from.
+
+   In case the buffer overflows during sampling, samples may be split.  */
+static int
+perf_event_read_bts (struct btrace_target_info* tinfo,
+                     const byte *begin, const byte *end, const byte *start,
+                     int (*fun) (struct linux_btrace_block *, void *),
+                     void *arg)
+{
+  struct perf_event_sample sample;
+  int read = 0, size = (end - begin), errcode = 0;
+  struct linux_btrace_block block = { 0, 0 };
+
+  if (start < begin)
+    return EINVAL;
+
+  if (end < start)
+    return EINVAL;
+
+  /* The buffer may contain a partial record as its last entry (i.e. when the
+     buffer size is not a mulitple of the sample size).  */
+  read = sizeof (sample) - 1;
+
+  for (; read < size; read += sizeof (sample))
+    {
+      const struct perf_event_sample *psample;
+
+      /* Find the next perf_event sample.  */
+      start -= sizeof (sample);
+      if (begin <= start)
+        psample = (const struct perf_event_sample *) start;
+      else
+        {
+          int missing = (begin - start);
+          start = (end - missing);
+
+          if (missing == sizeof (sample))
+            psample = (const struct perf_event_sample *) start;
+          else
+            {
+              byte *stack = (byte *) &sample;
+
+              memcpy (stack, start, missing);
+              memcpy (stack + missing, begin, sizeof (sample) - missing);
+
+              psample = &sample;
+            }
+        }
+
+      errcode = perf_event_check_sample (psample);
+      if (errcode)
+        break;
+
+      if (perf_event_skip_record (tinfo, &(psample->bts)))
+        continue;
+
+      /* We found a valid sample, so we can complete the current block.  */
+      block.begin = psample->bts.to;
+
+      errcode = (*fun) (&block, arg);
+      if (errcode)
+        break;
+
+      /* Start the next block.  */
+      block.end = psample->bts.from;
+    }
+
+  return errcode;
+}
+
+int
+linux_supports_btrace (void)
+{
+  return 1;
+}
+
+int
+linux_btrace_has_changed (struct btrace_target_info *tinfo)
+{
+  volatile struct perf_event_mmap_page *header = perf_event_header (tinfo);
+  if (!header)
+    return 0;
+
+  return (header->data_head != tinfo->data_head);
+}
+
+struct btrace_target_info *
+linux_enable_btrace (ptid_t ptid)
+{
+  struct btrace_target_info *tinfo;
+  int pid;
+
+  tinfo = xzalloc (sizeof (*tinfo));
+  if (!tinfo)
+    {
+      errno = ENOMEM;
+      return NULL;
+    }
+
+  tinfo->attr.size = sizeof (tinfo->attr);
+
+  tinfo->attr.type = PERF_TYPE_HARDWARE;
+  tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+  tinfo->attr.sample_period = 1;
+
+  /* We sample from and to address.  */
+  tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
+
+  tinfo->attr.exclude_kernel = 1;
+  tinfo->attr.exclude_hv = 1;
+  tinfo->attr.exclude_idle = 1;
+
+  tinfo->ptr_bits = 0;
+
+  pid = ptid_get_lwp (ptid);
+  if (!pid)
+    pid = ptid_get_pid (ptid);
+
+  tinfo->file = syscall (SYS_perf_event_open, &(tinfo->attr), pid, -1, -1, 0);
+  if (tinfo->file < 0)
+    goto err;
+
+  /* We hard-code the trace buffer size.
+     At some later time, we should make this configurable.  */
+  tinfo->size = 1;
+  tinfo->buffer = mmap (NULL, perf_event_mmap_size (tinfo),
+                        PROT_READ, MAP_SHARED, tinfo->file, 0);
+  if (tinfo->buffer == MAP_FAILED)
+    goto err_file;
+
+  return tinfo;
+
+err_file:
+  close (tinfo->file);
+
+err:
+  xfree (tinfo);
+  return NULL;
+}
+
+int
+linux_disable_btrace (struct btrace_target_info *tinfo)
+{
+  int errcode;
+
+  if (!tinfo)
+    return -EINVAL;
+
+  errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo));
+  if (errcode)
+    return errno;
+
+  close (tinfo->file);
+  xfree (tinfo);
+
+  return 0;
+}
+
+int
+linux_read_btrace (struct btrace_target_info *tinfo,
+                   int (*fun) (struct linux_btrace_block *, void *),
+                   void *arg)
+{
+  volatile struct perf_event_mmap_page *header;
+  const byte *begin, *end, *start;
+  unsigned long data_head, retries = 5;
+  size_t buffer_size;
+  int errcode = 0;
+
+  if (!tinfo)
+    return EINVAL;
+
+  header = perf_event_header (tinfo);
+  if (!header)
+    return ENOSYS;
+
+  buffer_size = perf_event_buffer_size (tinfo);
+
+  /* We may need to retry reading the trace.  See below.  */
+  while (retries--)
+    {
+      data_head = header->data_head;
+
+      /* If there new trace, let's read it.  */
+      if (data_head != tinfo->data_head)
+        {
+          /* Data_head keeps growing; the buffer itself is circular.  */
+          begin = perf_event_buffer_begin (tinfo);
+          start = begin + (data_head % buffer_size);
+
+          if (data_head <= buffer_size)
+            end = start;
+          else
+            end = perf_event_buffer_end (tinfo);
+
+          errcode = perf_event_read_bts (tinfo, begin, end, start, fun, arg);
+        }
+
+      /* The stopping thread notifies its ptracer before it is scheduled out.
+         On multi-core systems, the debugger might therefore run while the
+         kernel might be writing the last branch trace records.
+
+         Let's check whether the data head moved while we read the trace.  */
+      if (data_head == header->data_head)
+        break;
+    }
+
+  tinfo->data_head = data_head;
+
+  return errcode;
+}
+
+#else /* HAVE_LINUX_PERF_EVENT_H */
+
+int
+linux_supports_btrace (void)
+{
+  return 0;
+}
+
+int
+linux_btrace_has_changed (struct btrace_target_info *tinfo)
+{
+  return 0;
+}
+
+struct btrace_target_info *
+linux_enable_btrace (ptid_t ptid)
+{
+  errno = ENOSYS;
+  return NULL;
+}
+
+int
+linux_disable_btrace (struct btrace_target_info *tinfo)
+{
+  return ENOSYS;
+}
+
+int
+linux_read_btrace (struct btrace_target_info *tinfo,
+                   int (*fun) (struct linux_btrace_block *, void *),
+                   void *arg)
+{
+  return ENOSYS;
+}
+
+#endif /* HAVE_LINUX_PERF_EVENT_H */
diff --git a/gdb/common/linux-btrace.h b/gdb/common/linux-btrace.h
new file mode 100644
index 0000000..22f435a
--- /dev/null
+++ b/gdb/common/linux-btrace.h
@@ -0,0 +1,76 @@
+/* Linux-dependent part of branch trace support for GDB, and GDBserver.
+
+   Copyright (C) 2012 Free Software Foundation, Inc.
+
+   Contributed by Intel Corp. <markus.t.metzger@intel.com>
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#ifndef LINUX_BTRACE_H
+#define LINUX_BTRACE_H
+
+#include "config.h"
+#include "ptid.h"
+#include <stddef.h>
+
+#if HAVE_LINUX_PERF_EVENT_H
+#  include <linux/perf_event.h>
+#endif
+
+/* A branch trace block on linux.
+   GDB and GDBserver don't agree on the definition of CORE_ADDR.  In order to
+   allow sharing the perf_event access code, we stick to unsigned long long;
+   that's also what perf_event delivers.  */
+struct linux_btrace_block
+{
+  unsigned long long begin;
+  unsigned long long end;
+};
+
+/* Branch trace target information per thread.  */
+struct btrace_target_info
+{
+#if HAVE_LINUX_PERF_EVENT_H
+  /* The Linux perf_event configuration for collecting the branch trace.  */
+  struct perf_event_attr attr;
+
+  /* The mmap configuration mapping the branch trace perf_event buffer.
+
+     file      .. the file descriptor
+     buffer    .. the mmapped memory buffer
+     size      .. the buffer's size in pages without the configuration page
+     data_head .. the data head from the last read  */
+  int           file;
+  void         *buffer;
+  size_t        size;
+  unsigned long data_head;
+#endif /* HAVE_LINUX_PERF_EVENT_H */
+
+  /* The size of a pointer in bits for this thread.
+     The information is used to identify kernel addresses in order to skip
+     records from/to kernel space.  */
+  int ptr_bits;
+};
+
+extern int linux_supports_btrace (void);
+extern struct btrace_target_info *linux_enable_btrace (ptid_t);
+extern int linux_disable_btrace (struct btrace_target_info *);
+extern int linux_btrace_has_changed (struct btrace_target_info *);
+extern int linux_read_btrace (struct btrace_target_info *,
+                              int (*) (struct linux_btrace_block *, void *),
+                              void *);
+
+#endif /* LINUX_BTRACE_H */
diff --git a/gdb/gdbserver/Makefile.in b/gdb/gdbserver/Makefile.in
index 50786d5..1f256de 100644
--- a/gdb/gdbserver/Makefile.in
+++ b/gdb/gdbserver/Makefile.in
@@ -139,7 +139,7 @@ SFILES=	$(srcdir)/gdbreplay.c $(srcdir)/inferiors.c $(srcdir)/dll.c \
 	$(srcdir)/common/vec.c \
 	$(srcdir)/common/common-utils.c $(srcdir)/common/xml-utils.c \
 	$(srcdir)/common/linux-osdata.c $(srcdir)/common/ptid.c \
-	$(srcdir)/common/buffer.c
+	$(srcdir)/common/buffer.c $(srcdir)/common/linux-btrace.c
 
 DEPFILES = @GDBSERVER_DEPFILES@
 
@@ -408,6 +408,7 @@ signals_h = $(srcdir)/../../include/gdb/signals.h $(signals_def)
 ptid_h = $(srcdir)/../common/ptid.h
 ax_h = $(srcdir)/ax.h
 agent_h = $(srcdir)/../common/agent.h
+linux_btrace_h = $(srcdir)/../common/linux-btrace.h
 linux_osdata_h = $(srcdir)/../common/linux-osdata.h
 vec_h = $(srcdir)/../common/vec.h
 # Since everything must include server.h, we make that depend on
@@ -514,6 +515,9 @@ buffer.o: ../common/buffer.c $(server_h)
 agent.o: ../common/agent.c $(server_h) $(agent_h)
 	$(CC) -c $(CPPFLAGS) $(INTERNAL_CFLAGS) $< -DGDBSERVER
 
+linux-btrace.o: ../common/linux-btrace.c $(linux_btrace_h) $(server_h)
+	$(CC) -c $(CPPFLAGS) $(INTERNAL_CFLAGS) $< -DGDBSERVER
+
 # We build vasprintf with -DHAVE_CONFIG_H because we want that unit to
 # include our config.h file.  Otherwise, some system headers do not get
 # included, and the compiler emits a warning about implicitly defined
-- 
1.7.1


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]