This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 03/12] btrace, linux: add perf event buffer abstraction


Collect perf event buffer related fields from btrace_target_info into
a new struct perf_event_buffer.  Update functions that operated on the
buffer to take a struct perf_event_buffer pointer rather than a
btrace_target_info pointer.

We will need other perf event buffers with different content.

2014-07-14  Markus Metzger <markus.t.metzger@intel.com>

	* common/linux-btrace.h (perf_event_buffer): New.
	(btrace_target_info) <buffer, size, data_head>: Replace with ...
	<bts>: ... this.
	* common/linux-btrace.c (perf_event_header, perf_event_mmap_size)
	(perf_event_buffer_size, perf_event_buffer_begin)
	(perf_event_buffer_end): Update parameters.  Updated users.
	(perf_event_map_buffer, perf_event_unmap_buffer)
	(perf_event_new_data): New.
	(linux_enable_btrace): Call perf_event_map_buffer.
	(linux_disable_btrace): Call perf_event_unmap_buffer.
	(linux_btrace_has_changed): Remove.
---
 gdb/nat/linux-btrace.c | 117 ++++++++++++++++++++++++++++++-------------------
 gdb/nat/linux-btrace.h |  29 ++++++++----
 2 files changed, 93 insertions(+), 53 deletions(-)

diff --git a/gdb/nat/linux-btrace.c b/gdb/nat/linux-btrace.c
index 20b4ef0..f33a455 100644
--- a/gdb/nat/linux-btrace.c
+++ b/gdb/nat/linux-btrace.c
@@ -72,42 +72,90 @@ struct perf_event_sample
 /* Get the perf_event header.  */
 
 static inline volatile struct perf_event_mmap_page *
-perf_event_header (struct btrace_target_info* tinfo)
+perf_event_header (const struct perf_event_buffer *pevent)
 {
-  return tinfo->buffer;
+  return pevent->mem;
 }
 
 /* Get the size of the perf_event mmap buffer.  */
 
 static inline size_t
-perf_event_mmap_size (const struct btrace_target_info *tinfo)
+perf_event_mmap_size (const struct perf_event_buffer *pevent)
 {
   /* The branch trace buffer is preceded by a configuration page.  */
-  return (tinfo->size + 1) * PAGE_SIZE;
+  return (pevent->size + 1) * PAGE_SIZE;
 }
 
 /* Get the size of the perf_event buffer.  */
 
 static inline size_t
-perf_event_buffer_size (struct btrace_target_info* tinfo)
+perf_event_buffer_size (const struct perf_event_buffer *pevent)
 {
-  return tinfo->size * PAGE_SIZE;
+  return pevent->size * PAGE_SIZE;
 }
 
 /* Get the start address of the perf_event buffer.  */
 
 static inline const uint8_t *
-perf_event_buffer_begin (struct btrace_target_info* tinfo)
+perf_event_buffer_begin (const struct perf_event_buffer *pevent)
 {
-  return ((const uint8_t *) tinfo->buffer) + PAGE_SIZE;
+  return ((const uint8_t *) pevent->mem) + PAGE_SIZE;
 }
 
 /* Get the end address of the perf_event buffer.  */
 
 static inline const uint8_t *
-perf_event_buffer_end (struct btrace_target_info* tinfo)
+perf_event_buffer_end (const struct perf_event_buffer *pevent)
 {
-  return perf_event_buffer_begin (tinfo) + perf_event_buffer_size (tinfo);
+  return perf_event_buffer_begin (pevent) + perf_event_buffer_size (pevent);
+}
+
+/* Map a perf event buffer.
+   Return zero on success; a negative number otherwise.  */
+
+static int
+perf_event_map_buffer (struct perf_event_buffer *pev, int file, off_t offset)
+{
+  int pg;
+
+  /* We try to allocate as much buffer as we can get.
+     We could allow the user to specify the size of the buffer, but then
+     we'd leave this search for the maximum buffer size to him.  */
+  for (pg = 4; pg >= 0; --pg)
+    {
+      /* The number of pages we request needs to be a power of two.  */
+      pev->size = 1 << pg;
+      pev->mem = mmap (NULL, perf_event_mmap_size (pev),
+		       PROT_READ, MAP_SHARED, file, offset);
+      if (pev->mem != MAP_FAILED)
+	return 0;
+    }
+
+  return -1;
+}
+
+/* Unmap a perf event buffer.
+   Return zero on success; a negative number otherwise.  */
+
+static int
+perf_event_unmap_buffer (struct perf_event_buffer *pev)
+{
+  int errcode;
+
+  errcode = munmap (pev->mem, perf_event_mmap_size (pev));
+  if (errcode != 0)
+    return errno;
+
+  return 0;
+}
+
+/* Return non-zero if there is new data in PEVENT; zero otherwise.  */
+
+static int
+perf_event_new_data (const struct perf_event_buffer *pev)
+{
+  volatile struct perf_event_mmap_page *header = perf_event_header (pev);
+  return header->data_head != pev->data_head;
 }
 
 /* Check whether an address is in the kernel.  */
@@ -447,7 +495,7 @@ struct btrace_target_info *
 linux_enable_btrace (ptid_t ptid)
 {
   struct btrace_target_info *tinfo;
-  int pid, pg;
+  int pid, errcode;
 
   tinfo = xzalloc (sizeof (*tinfo));
   tinfo->ptid = ptid;
@@ -475,20 +523,9 @@ linux_enable_btrace (ptid_t ptid)
   if (tinfo->file < 0)
     goto err;
 
-  /* We try to allocate as much buffer as we can get.
-     We could allow the user to specify the size of the buffer, but then
-     we'd leave this search for the maximum buffer size to him.  */
-  for (pg = 4; pg >= 0; --pg)
-    {
-      /* The number of pages we request needs to be a power of two.  */
-      tinfo->size = 1 << pg;
-      tinfo->buffer = mmap (NULL, perf_event_mmap_size (tinfo),
-			    PROT_READ, MAP_SHARED, tinfo->file, 0);
-      if (tinfo->buffer == MAP_FAILED)
-	continue;
-
-      return tinfo;
-    }
+  errcode = perf_event_map_buffer (&tinfo->bts, tinfo->file, 0);
+  if (errcode == 0)
+    return tinfo;
 
   /* We were not able to allocate any buffer.  */
   close (tinfo->file);
@@ -505,8 +542,7 @@ linux_disable_btrace (struct btrace_target_info *tinfo)
 {
   int errcode;
 
-  errno = 0;
-  errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo));
+  errcode = perf_event_unmap_buffer (&tinfo->bts);
   if (errcode != 0)
     return BTRACE_ERR_UNKNOWN;
 
@@ -516,16 +552,6 @@ linux_disable_btrace (struct btrace_target_info *tinfo)
   return BTRACE_ERR_NONE;
 }
 
-/* Check whether the branch trace has changed.  */
-
-static int
-linux_btrace_has_changed (struct btrace_target_info *tinfo)
-{
-  volatile struct perf_event_mmap_page *header = perf_event_header (tinfo);
-
-  return header->data_head != tinfo->data_head;
-}
-
 /* Read branch trace data in BTS format for the thread given by TINFO into
    BTRACE using the TYPE reading method.  */
 
@@ -535,18 +561,21 @@ linux_read_bts (struct btrace_data_bts *btrace,
 		enum btrace_read_type type)
 {
   volatile struct perf_event_mmap_page *header;
+  struct perf_event_buffer *pevent;
   const uint8_t *begin, *end, *start;
   unsigned long data_head, data_tail, retries = 5;
   size_t buffer_size, size;
 
+  pevent = &tinfo->bts;
+
   /* For delta reads, we return at least the partial last block containing
      the current PC.  */
-  if (type == BTRACE_READ_NEW && !linux_btrace_has_changed (tinfo))
+  if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
     return BTRACE_ERR_NONE;
 
-  header = perf_event_header (tinfo);
-  buffer_size = perf_event_buffer_size (tinfo);
-  data_tail = tinfo->data_head;
+  header = perf_event_header (pevent);
+  buffer_size = perf_event_buffer_size (pevent);
+  data_tail = pevent->data_head;
 
   /* We may need to retry reading the trace.  See below.  */
   while (retries--)
@@ -583,13 +612,13 @@ linux_read_bts (struct btrace_data_bts *btrace,
 	}
 
       /* Data_head keeps growing; the buffer itself is circular.  */
-      begin = perf_event_buffer_begin (tinfo);
+      begin = perf_event_buffer_begin (pevent);
       start = begin + data_head % buffer_size;
 
       if (data_head <= buffer_size)
 	end = start;
       else
-	end = perf_event_buffer_end (tinfo);
+	end = perf_event_buffer_end (pevent);
 
       btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
 
@@ -602,7 +631,7 @@ linux_read_bts (struct btrace_data_bts *btrace,
 	break;
     }
 
-  tinfo->data_head = data_head;
+  pevent->data_head = data_head;
 
   /* Prune the incomplete last block (i.e. the first one of inferior execution)
      if we're not doing a delta read.  There is no way of filling in its zeroed
diff --git a/gdb/nat/linux-btrace.h b/gdb/nat/linux-btrace.h
index 7b02db5..5af3f87 100644
--- a/gdb/nat/linux-btrace.h
+++ b/gdb/nat/linux-btrace.h
@@ -33,6 +33,22 @@
 #  include <linux/perf_event.h>
 #endif
 
+#if HAVE_LINUX_PERF_EVENT_H
+/* A Linux perf event buffer.  */
+struct perf_event_buffer
+{
+  /* The mapped memory.  */
+  void *mem;
+
+  /* The size of the mapped memory in pages excluding the initial
+     configuration page.  */
+  size_t size;
+
+  /* The data_head value from the last read.  */
+  unsigned long data_head;
+};
+#endif /* HAVE_LINUX_PERF_EVENT_H */
+
 /* Branch trace target information per thread.  */
 struct btrace_target_info
 {
@@ -43,16 +59,11 @@ struct btrace_target_info
   /* The ptid of this thread.  */
   ptid_t ptid;
 
-  /* The mmap configuration mapping the branch trace perf_event buffer.
-
-     file      .. the file descriptor
-     buffer    .. the mmapped memory buffer
-     size      .. the buffer's size in pages without the configuration page
-     data_head .. the data head from the last read  */
+  /* The perf event file.  */
   int file;
-  void *buffer;
-  size_t size;
-  unsigned long data_head;
+
+  /* The BTS perf event buffer.  */
+  struct perf_event_buffer bts;
 #endif /* HAVE_LINUX_PERF_EVENT_H */
 
   /* The size of a pointer in bits for this thread.
-- 
1.8.3.1


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]