This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[RFC v4 3/9] Add basic Linux kernel support


This patch implements a basic target_ops for Linux kernel support. In
particular it models Linux tasks as GDB threads such that you are able to
change to a given thread, get backtraces, disassemble the current frame
etc..

To simplify matters this patch only supports static targets, i.e. core
dumps.  Support for live targets will be provided in a separate patch.

gdb/ChangeLog:

    * gdbarch.sh (lk_init_private): New hook.
    * gdbarch.h: Regenerated.
    * gdbarch.c: Regenerated.
    * lk-low.h: New file.
    * lk-low.c: New file.
    * lk-lists.h: New file.
    * lk-lists.c: New file.
    * Makefile.in (SFILES, ALLDEPFILES): Add lk-low.c and lk-lists.c.
    (HFILES_NO_SRCDIR): Add lk-low.h and lk-lists.h.
    (ALL_TARGET_OBS): Add lk-low.o and lk-lists.o.
    * configure.tgt (lk_target_obs): New variable with object files for Linux
      kernel support.
      (s390*-*-linux*): Add lk_target_obs.
---
 gdb/Makefile.in   |   8 +
 gdb/configure.tgt |   6 +-
 gdb/gdbarch.c     |  31 ++
 gdb/gdbarch.h     |   7 +
 gdb/gdbarch.sh    |   4 +
 gdb/lk-lists.c    |  47 +++
 gdb/lk-lists.h    |  56 ++++
 gdb/lk-low.c      | 831 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 gdb/lk-low.h      | 310 ++++++++++++++++++++
 9 files changed, 1299 insertions(+), 1 deletion(-)
 create mode 100644 gdb/lk-lists.c
 create mode 100644 gdb/lk-lists.h
 create mode 100644 gdb/lk-low.c
 create mode 100644 gdb/lk-low.h

diff --git a/gdb/Makefile.in b/gdb/Makefile.in
index 5e5fcaae7a..a73ca26a29 100644
--- a/gdb/Makefile.in
+++ b/gdb/Makefile.in
@@ -827,6 +827,8 @@ ALL_TARGET_OBS = \
 	iq2000-tdep.o \
 	linux-record.o \
 	linux-tdep.o \
+	lk-lists.o \
+	lk-low.o \
 	lm32-tdep.o \
 	m32c-tdep.o \
 	m32r-linux-tdep.o \
@@ -1127,6 +1129,8 @@ SFILES = \
 	jit.c \
 	language.c \
 	linespec.c \
+	lk-lists.c \
+	lk-low.c \
 	location.c \
 	m2-exp.y \
 	m2-lang.c \
@@ -1376,6 +1380,8 @@ HFILES_NO_SRCDIR = \
 	linux-nat.h \
 	linux-record.h \
 	linux-tdep.h \
+	lk-lists.h \
+	lk-low.h \
 	location.h \
 	m2-lang.h \
 	m32r-tdep.h \
@@ -2582,6 +2588,8 @@ ALLDEPFILES = \
 	linux-fork.c \
 	linux-record.c \
 	linux-tdep.c \
+	lk-lists.c \
+	lk-low.c \
 	lm32-tdep.c \
 	m32r-linux-nat.c \
 	m32r-linux-tdep.c \
diff --git a/gdb/configure.tgt b/gdb/configure.tgt
index fdcb7b1d69..35db698860 100644
--- a/gdb/configure.tgt
+++ b/gdb/configure.tgt
@@ -36,6 +36,10 @@ case $targ in
     ;;
 esac
 
+# List of objectfiles for Linux kernel support.  To be included into *-linux*
+# targets wich support Linux kernel debugging.
+lk_target_obs="lk-lists.o lk-low.o"
+
 # map target info into gdb names.
 
 case "${targ}" in
@@ -480,7 +484,7 @@ powerpc*-*-*)
 s390*-*-linux*)
 	# Target: S390 running Linux
 	gdb_target_obs="s390-linux-tdep.o solib-svr4.o linux-tdep.o \
-			linux-record.o"
+			linux-record.o ${lk_target_obs}"
 	build_gdbserver=yes
 	;;
 
diff --git a/gdb/gdbarch.c b/gdb/gdbarch.c
index e5efdfbb26..cacbc3e740 100644
--- a/gdb/gdbarch.c
+++ b/gdb/gdbarch.c
@@ -351,6 +351,7 @@ struct gdbarch
   gdbarch_addressable_memory_unit_size_ftype *addressable_memory_unit_size;
   char ** disassembler_options;
   const disasm_options_t * valid_disassembler_options;
+  gdbarch_lk_init_private_ftype *lk_init_private;
 };
 
 /* Create a new ``struct gdbarch'' based on information provided by
@@ -1145,6 +1146,12 @@ gdbarch_dump (struct gdbarch *gdbarch, struct ui_file *file)
                       "gdbarch_dump: iterate_over_regset_sections = <%s>\n",
                       host_address_to_string (gdbarch->iterate_over_regset_sections));
   fprintf_unfiltered (file,
+                      "gdbarch_dump: gdbarch_lk_init_private_p() = %d\n",
+                      gdbarch_lk_init_private_p (gdbarch));
+  fprintf_unfiltered (file,
+                      "gdbarch_dump: lk_init_private = <%s>\n",
+                      host_address_to_string (gdbarch->lk_init_private));
+  fprintf_unfiltered (file,
                       "gdbarch_dump: long_bit = %s\n",
                       plongest (gdbarch->long_bit));
   fprintf_unfiltered (file,
@@ -5055,6 +5062,30 @@ set_gdbarch_valid_disassembler_options (struct gdbarch *gdbarch,
   gdbarch->valid_disassembler_options = valid_disassembler_options;
 }
 
+int
+gdbarch_lk_init_private_p (struct gdbarch *gdbarch)
+{
+  gdb_assert (gdbarch != NULL);
+  return gdbarch->lk_init_private != NULL;
+}
+
+void
+gdbarch_lk_init_private (struct gdbarch *gdbarch)
+{
+  gdb_assert (gdbarch != NULL);
+  gdb_assert (gdbarch->lk_init_private != NULL);
+  if (gdbarch_debug >= 2)
+    fprintf_unfiltered (gdb_stdlog, "gdbarch_lk_init_private called\n");
+  gdbarch->lk_init_private (gdbarch);
+}
+
+void
+set_gdbarch_lk_init_private (struct gdbarch *gdbarch,
+                             gdbarch_lk_init_private_ftype lk_init_private)
+{
+  gdbarch->lk_init_private = lk_init_private;
+}
+
 
 /* Keep a registry of per-architecture data-pointers required by GDB
    modules.  */
diff --git a/gdb/gdbarch.h b/gdb/gdbarch.h
index ab7561f851..bfff52ce13 100644
--- a/gdb/gdbarch.h
+++ b/gdb/gdbarch.h
@@ -1553,6 +1553,13 @@ extern void set_gdbarch_disassembler_options (struct gdbarch *gdbarch, char ** d
 
 extern const disasm_options_t * gdbarch_valid_disassembler_options (struct gdbarch *gdbarch);
 extern void set_gdbarch_valid_disassembler_options (struct gdbarch *gdbarch, const disasm_options_t * valid_disassembler_options);
+/* Initiate architecture dependent private data for the linux-kernel target. */
+
+extern int gdbarch_lk_init_private_p (struct gdbarch *gdbarch);
+
+typedef void (gdbarch_lk_init_private_ftype) (struct gdbarch *gdbarch);
+extern void gdbarch_lk_init_private (struct gdbarch *gdbarch);
+extern void set_gdbarch_lk_init_private (struct gdbarch *gdbarch, gdbarch_lk_init_private_ftype *lk_init_private);
 
 /* Definition for an unknown syscall, used basically in error-cases.  */
 #define UNKNOWN_SYSCALL (-1)
diff --git a/gdb/gdbarch.sh b/gdb/gdbarch.sh
index 22f5715037..8bc9456d3e 100755
--- a/gdb/gdbarch.sh
+++ b/gdb/gdbarch.sh
@@ -1160,6 +1160,10 @@ m;int;addressable_memory_unit_size;void;;;default_addressable_memory_unit_size;;
 v;char **;disassembler_options;;;0;0;;0;pstring_ptr (gdbarch->disassembler_options)
 v;const disasm_options_t *;valid_disassembler_options;;;0;0;;0;host_address_to_string (gdbarch->valid_disassembler_options)
 
+# Initialize architecture dependent private data for the linux-kernel
+# target.
+M:void:lk_init_private:void:
+
 EOF
 }
 
diff --git a/gdb/lk-lists.c b/gdb/lk-lists.c
new file mode 100644
index 0000000000..55d11bd11d
--- /dev/null
+++ b/gdb/lk-lists.c
@@ -0,0 +1,47 @@
+/* Iterators for internal data structures of the Linux kernel.
+
+   Copyright (C) 2016 Free Software Foundation, Inc.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#include "defs.h"
+
+#include "inferior.h"
+#include "lk-lists.h"
+#include "lk-low.h"
+
+/* Returns next entry from struct list_head CURR while iterating field
+   SNAME->FNAME.  */
+
+CORE_ADDR
+lk_list_head_next (CORE_ADDR curr, const char *sname, const char *fname)
+{
+  CORE_ADDR next, next_prev;
+
+  /* We must always assume that the data we handle is corrupted.  Thus use
+     curr->next->prev == curr as sanity check.  */
+  next = lk_read_addr (curr + LK_OFFSET (list_head, next));
+  next_prev = lk_read_addr (next + LK_OFFSET (list_head, prev));
+
+  if (!curr || curr != next_prev)
+    {
+      error (_("Memory corruption detected while iterating list_head at "\
+	       "0x%s belonging to list %s->%s."),
+	     phex (curr, lk_builtin_type_size (unsigned_long)) , sname, fname);
+    }
+
+  return next;
+}
diff --git a/gdb/lk-lists.h b/gdb/lk-lists.h
new file mode 100644
index 0000000000..f9c2a856e9
--- /dev/null
+++ b/gdb/lk-lists.h
@@ -0,0 +1,56 @@
+/* Iterators for internal data structures of the Linux kernel.
+
+   Copyright (C) 2016 Free Software Foundation, Inc.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#ifndef __LK_LISTS_H__
+#define __LK_LISTS_H__
+
+extern CORE_ADDR lk_list_head_next (CORE_ADDR curr, const char *sname,
+				    const char *fname);
+
+/* Iterator over field SNAME->FNAME of type struct list_head starting at
+   address START of type struct list_head.  This iterator is intended to be
+   used for lists initiated with macro LIST_HEAD (include/linux/list.h) in
+   the kernel, i.e. lists that START is a global variable of type struct
+   list_head and _not_ of type struct SNAME as the rest of the list.  Thus
+   START will not be iterated over but only be used to start/terminate the
+   iteration.  */
+
+#define lk_list_for_each(next, start, sname, fname)		\
+  for ((next) = lk_list_head_next ((start), #sname, #fname);	\
+       (next) != (start);					\
+       (next) = lk_list_head_next ((next), #sname, #fname))
+
+/* Iterator over struct SNAME linked together via field SNAME->FNAME of type
+   struct list_head starting at address START of type struct SNAME.  In
+   contrast to the iterator above, START is a "full" member of the list and
+   thus will be iterated over.  */
+
+#define lk_list_for_each_container(cont, start, sname, fname)	\
+  CORE_ADDR _next;						\
+  bool _first_loop = true;					\
+  for ((cont) = (start),					\
+       _next = (start) + LK_OFFSET (sname, fname);		\
+								\
+       (cont) != (start) || _first_loop;			\
+								\
+       _next = lk_list_head_next (_next, #sname, #fname),	\
+       (cont) = LK_CONTAINER_OF (_next, sname, fname),		\
+       _first_loop = false)
+
+#endif /* __LK_LISTS_H__ */
diff --git a/gdb/lk-low.c b/gdb/lk-low.c
new file mode 100644
index 0000000000..e482ad08e7
--- /dev/null
+++ b/gdb/lk-low.c
@@ -0,0 +1,831 @@
+/* Basic Linux kernel support, architecture independent.
+
+   Copyright (C) 2016 Free Software Foundation, Inc.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#include "defs.h"
+
+#include "block.h"
+#include "exceptions.h"
+#include "frame.h"
+#include "gdbarch.h"
+#include "gdbcore.h"
+#include "gdbthread.h"
+#include "gdbtypes.h"
+#include "inferior.h"
+#include "lk-lists.h"
+#include "lk-low.h"
+#include "objfiles.h"
+#include "observer.h"
+#include "solib.h"
+#include "target.h"
+#include "value.h"
+
+#include <algorithm>
+
+struct target_ops *linux_kernel_ops = NULL;
+
+/* Initialize a private data entry for an address, where NAME is the name
+   of the symbol, i.e. variable name in Linux, ALIAS the name used to
+   retrieve the entry from hashtab, and SILENT a flag to determine if
+   errors should be ignored.
+
+   Returns a pointer to the new entry.  In case of an error, either returns
+   NULL (SILENT = TRUE) or throws an error (SILENT = FALSE).  If SILENT = TRUE
+   the caller is responsible to check for errors.
+
+   Do not use directly, use LK_DECLARE_* macros defined in lk-low.h instead.  */
+
+struct lk_private_data *
+lk_init_addr (const char *name, const char *alias, int silent)
+{
+  struct lk_private_data *data;
+  struct bound_minimal_symbol bmsym;
+  void **new_slot;
+  void *old_slot;
+
+  if ((old_slot = lk_find (alias)) != NULL)
+    return (struct lk_private_data *) old_slot;
+
+  bmsym = lookup_minimal_symbol (name, NULL, NULL);
+
+  if (bmsym.minsym == NULL)
+    {
+      if (!silent)
+	error (_("Could not find address %s.  Aborting."), alias);
+      return NULL;
+    }
+
+  data = XCNEW (struct lk_private_data);
+  data->alias = alias;
+  data->data.addr = BMSYMBOL_VALUE_ADDRESS (bmsym);
+
+  new_slot = lk_find_slot (alias);
+  *new_slot = data;
+
+  return data;
+}
+
+/* Same as lk_init_addr but for structs.  */
+
+struct lk_private_data *
+lk_init_struct (const char *name, const char *alias, int silent)
+{
+  struct lk_private_data *data;
+  const struct block *global;
+  const struct symbol *sym;
+  struct type *type;
+  void **new_slot;
+  void *old_slot;
+
+  if ((old_slot = lk_find (alias)) != NULL)
+    return (struct lk_private_data *) old_slot;
+
+  global = block_global_block(get_selected_block (0));
+  sym = lookup_symbol (name, global, STRUCT_DOMAIN, NULL).symbol;
+
+  if (sym != NULL)
+    {
+      type = SYMBOL_TYPE (sym);
+      goto out;
+    }
+
+  /*  Chek for "typedef struct { ... } name;"-like definitions.  */
+  sym = lookup_symbol (name, global, VAR_DOMAIN, NULL).symbol;
+  if (sym == NULL)
+    goto error;
+
+  type = check_typedef (SYMBOL_TYPE (sym));
+
+  if (TYPE_CODE (type) == TYPE_CODE_STRUCT)
+    goto out;
+
+error:
+  if (!silent)
+    error (_("Could not find %s.  Aborting."), alias);
+
+  return NULL;
+
+out:
+  data = XCNEW (struct lk_private_data);
+  data->alias = alias;
+  data->data.type = type;
+
+  new_slot = lk_find_slot (alias);
+  *new_slot = data;
+
+  return data;
+}
+
+/* Nearly the same as lk_init_addr, with the difference that two names are
+   needed, i.e. the struct name S_NAME containing the field with name
+   F_NAME.  */
+
+struct lk_private_data *
+lk_init_field (const char *s_name, const char *f_name,
+	       const char *s_alias, const char *f_alias,
+	       int silent)
+{
+  struct lk_private_data *data;
+  struct lk_private_data *parent;
+  struct field *first, *last, *field;
+  void **new_slot;
+  void *old_slot;
+
+  if ((old_slot = lk_find (f_alias)) != NULL)
+    return (struct lk_private_data *) old_slot;
+
+  parent = lk_find (s_alias);
+  if (parent == NULL)
+    {
+      parent = lk_init_struct (s_name, s_alias, silent);
+
+      /* Only SILENT == true needed, as otherwise lk_init_struct would throw
+	 an error.  */
+      if (parent == NULL)
+	return NULL;
+    }
+
+  first = TYPE_FIELDS (parent->data.type);
+  last = first + TYPE_NFIELDS (parent->data.type);
+  for (field = first; field < last; field ++)
+    {
+      if (streq (field->name, f_name))
+	break;
+    }
+
+  if (field == last)
+    {
+      if (!silent)
+	error (_("Could not find field %s->%s.  Aborting."), s_alias, f_name);
+      return NULL;
+    }
+
+  data = XCNEW (struct lk_private_data);
+  data->alias = f_alias;
+  data->data.field = field;
+
+  new_slot = lk_find_slot (f_alias);
+  *new_slot = data;
+
+  return data;
+}
+
+/* Map cpu number CPU to the original PTID from target beneath.  */
+
+static ptid_t
+lk_cpu_to_old_ptid (const int cpu)
+{
+  struct lk_ptid_map *ptid_map;
+
+  for (ptid_map = LK_PRIVATE->old_ptid; ptid_map;
+       ptid_map = ptid_map->next)
+    {
+      if (ptid_map->cpu == cpu)
+	return ptid_map->old_ptid;
+    }
+
+  error (_("Could not map CPU %d to original PTID.  Aborting."), cpu);
+}
+
+/* Helper functions to read and return basic types at a given ADDRess.  */
+
+/* Read and return the integer value at address ADDR.  */
+
+int
+lk_read_int (CORE_ADDR addr)
+{
+  size_t int_size = lk_builtin_type_size (int);
+  enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
+  return read_memory_integer (addr, int_size, endian);
+}
+
+/* Read and return the unsigned integer value at address ADDR.  */
+
+unsigned int
+lk_read_uint (CORE_ADDR addr)
+{
+  size_t uint_size = lk_builtin_type_size (unsigned_int);
+  enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
+  return read_memory_integer (addr, uint_size, endian);
+}
+
+/* Read and return the long integer value at address ADDR.  */
+
+LONGEST
+lk_read_long (CORE_ADDR addr)
+{
+  size_t long_size = lk_builtin_type_size (long);
+  enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
+  return read_memory_integer (addr, long_size, endian);
+}
+
+/* Read and return the unsigned long integer value at address ADDR.  */
+
+ULONGEST
+lk_read_ulong (CORE_ADDR addr)
+{
+  size_t ulong_size = lk_builtin_type_size (unsigned_long);
+  enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
+  return read_memory_unsigned_integer (addr, ulong_size, endian);
+}
+
+/* Read and return the address value at address ADDR.  */
+
+CORE_ADDR
+lk_read_addr (CORE_ADDR addr)
+{
+  return (CORE_ADDR) lk_read_ulong (addr);
+}
+
+/* Reads a bitmap at a given ADDRess of size SIZE (in bits). Allocates and
+   returns an array of ulongs.  The caller is responsible to free the array
+   after it is no longer needed.  */
+
+ULONGEST *
+lk_read_bitmap (CORE_ADDR addr, size_t size)
+{
+  ULONGEST *bitmap;
+  size_t ulong_size, len;
+
+  ulong_size = lk_builtin_type_size (unsigned_long);
+  len = LK_DIV_ROUND_UP (size, ulong_size * LK_BITS_PER_BYTE);
+  bitmap = XNEWVEC (ULONGEST, len);
+
+  for (size_t i = 0; i < len; i++)
+    bitmap[i] = lk_read_ulong (addr + i * ulong_size);
+
+  return bitmap;
+}
+
+/* Return the next set bit in bitmap BITMAP of size SIZE (in bits)
+   starting from bit (index) BIT.  Return SIZE when the end of the bitmap
+   was reached.  To iterate over all set bits use macro
+   LK_BITMAP_FOR_EACH_SET_BIT defined in lk-low.h.  */
+
+size_t
+lk_bitmap_find_next_bit (ULONGEST *bitmap, size_t size, size_t bit)
+{
+  size_t ulong_size, bits_per_ulong, elt;
+
+  ulong_size = lk_builtin_type_size (unsigned_long);
+  bits_per_ulong = ulong_size * LK_BITS_PER_BYTE;
+  elt = bit / bits_per_ulong;
+
+  while (bit < size)
+    {
+      /* FIXME: Explain why using lsb0 bit order.  */
+      if (bitmap[elt] & (1UL << (bit % bits_per_ulong)))
+	return bit;
+
+      bit++;
+      if (bit % bits_per_ulong == 0)
+	elt++;
+    }
+
+  return size;
+}
+
+/* Returns the Hamming weight, i.e. number of set bits, of bitmap BITMAP
+   with size SIZE (in bits).  */
+
+size_t
+lk_bitmap_hweight (ULONGEST *bitmap, size_t size)
+{
+  size_t ulong_size, bit, bits_per_ulong, elt, retval;
+
+  ulong_size = lk_builtin_type_size (unsigned_long);
+  bits_per_ulong = ulong_size * LK_BITS_PER_BYTE;
+  elt = bit = 0;
+  retval = 0;
+
+  while (bit < size)
+    {
+      if (bitmap[elt] & (1 << bit % bits_per_ulong))
+	retval++;
+
+      bit++;
+      if (bit % bits_per_ulong == 0)
+	elt++;
+    }
+
+  return retval;
+}
+
+/* Provide the per_cpu_offset of cpu CPU.  See comment in lk-low.h for
+   details.  */
+
+CORE_ADDR
+lk_get_percpu_offset (unsigned int cpu)
+{
+  size_t ulong_size = lk_builtin_type_size (unsigned_long);
+  CORE_ADDR percpu_elt;
+
+  /* Give the architecture a chance to overwrite default behaviour.  */
+  if (LK_HOOK->get_percpu_offset)
+      return LK_HOOK->get_percpu_offset (cpu);
+
+  percpu_elt = LK_ADDR (__per_cpu_offset) + (ulong_size * cpu);
+  return lk_read_addr (percpu_elt);
+}
+
+
+/* Test if a given task TASK is running.  See comment in lk-low.h for
+   details.  */
+
+unsigned int
+lk_task_running (CORE_ADDR task)
+{
+  ULONGEST *cpu_online_mask;
+  size_t size;
+  unsigned int cpu;
+  struct cleanup *old_chain;
+
+  size = LK_BITMAP_SIZE (cpumask);
+  cpu_online_mask = lk_read_bitmap (LK_ADDR (cpu_online_mask), size);
+  old_chain = make_cleanup (xfree, cpu_online_mask);
+
+  LK_BITMAP_FOR_EACH_SET_BIT (cpu_online_mask, size, cpu)
+    {
+      CORE_ADDR rq;
+      CORE_ADDR curr;
+
+      rq = LK_ADDR (runqueues) + lk_get_percpu_offset (cpu);
+      curr = lk_read_addr (rq + LK_OFFSET (rq, curr));
+
+      if (curr == task)
+	break;
+    }
+
+  if (cpu == size)
+    cpu = LK_CPU_INVAL;
+
+  do_cleanups (old_chain);
+  return cpu;
+}
+
+/* Update running tasks with information from struct rq->curr. */
+
+static void
+lk_update_running_tasks ()
+{
+  ULONGEST *cpu_online_mask;
+  size_t size;
+  unsigned int cpu;
+  struct cleanup *old_chain;
+
+  size = LK_BITMAP_SIZE (cpumask);
+  cpu_online_mask = lk_read_bitmap (LK_ADDR (cpu_online_mask), size);
+  old_chain = make_cleanup (xfree, cpu_online_mask);
+
+  LK_BITMAP_FOR_EACH_SET_BIT (cpu_online_mask, size, cpu)
+    {
+      struct thread_info *tp;
+      CORE_ADDR rq, curr;
+      LONGEST pid, inf_pid;
+
+      rq = LK_ADDR (runqueues) + lk_get_percpu_offset (cpu);
+      curr = lk_read_addr (rq + LK_OFFSET (rq, curr));
+      pid = lk_read_int (curr + LK_OFFSET (task_struct, pid));
+      inf_pid = current_inferior ()->pid;
+
+      ptid_t new_ptid (inf_pid, pid, curr);
+      ptid_t old_ptid = lk_cpu_to_old_ptid (cpu); /* FIXME not suitable for
+						     running targets? */
+
+      tp = find_thread_ptid (old_ptid);
+      if (tp && tp->state != THREAD_EXITED)
+	thread_change_ptid (old_ptid, new_ptid);
+    }
+  do_cleanups (old_chain);
+}
+
+/* Update sleeping tasks by walking the task_structs starting from
+   init_task.  */
+
+static void
+lk_update_sleeping_tasks ()
+{
+  CORE_ADDR init_task, task, thread;
+  int inf_pid;
+
+  inf_pid = current_inferior ()->pid;
+  init_task = LK_ADDR (init_task);
+
+  lk_list_for_each_container (task, init_task, task_struct, tasks)
+    {
+      lk_list_for_each_container (thread, task, task_struct, thread_group)
+	{
+	  int pid;
+	  struct thread_info *tp;
+
+	  pid = lk_read_int (thread + LK_OFFSET (task_struct, pid));
+	  ptid_t ptid (inf_pid, pid, thread);
+
+	  tp = find_thread_ptid (ptid);
+	  if (tp == NULL || tp->state == THREAD_EXITED)
+	    add_thread (ptid);
+	}
+    }
+}
+
+/* Function for targets to_update_thread_list hook.  */
+
+static void
+lk_update_thread_list (struct target_ops *target)
+{
+  prune_threads ();
+  lk_update_running_tasks ();
+  lk_update_sleeping_tasks ();
+}
+
+/* Function for targets to_fetch_registers hook.  */
+
+static void
+lk_fetch_registers (struct target_ops *target,
+		    struct regcache *regcache, int regnum)
+{
+  CORE_ADDR task;
+  unsigned int cpu;
+
+  task = (CORE_ADDR) regcache_get_ptid (regcache).tid ();
+  cpu = lk_task_running (task);
+
+  /* Let the target beneath fetch registers of running tasks.  */
+  if (cpu != LK_CPU_INVAL)
+    {
+      struct cleanup *old_inferior_ptid;
+
+      old_inferior_ptid = save_inferior_ptid ();
+      inferior_ptid = lk_cpu_to_old_ptid (cpu);
+      linux_kernel_ops->beneath->to_fetch_registers (target, regcache, regnum);
+      do_cleanups (old_inferior_ptid);
+    }
+  else
+    {
+      struct gdbarch *gdbarch;
+      unsigned int i;
+
+      LK_HOOK->get_registers (task, target, regcache, regnum);
+
+      /* Mark all registers not found as unavailable.  */
+      gdbarch = get_regcache_arch (regcache);
+      for (i = 0; i < gdbarch_num_regs (gdbarch); i++)
+	{
+	  if (regcache_register_status (regcache, i) == REG_UNKNOWN)
+	    regcache_raw_supply (regcache, i, NULL);
+	}
+    }
+}
+
+/* Function for targets to_pid_to_str hook.  Marks running tasks with an
+   asterisk "*".  */
+
+static const char *
+lk_pid_to_str (struct target_ops *target, ptid_t ptid)
+{
+  static char buf[64];
+  long pid;
+  CORE_ADDR task;
+
+  pid = ptid.lwp ();
+  task = (CORE_ADDR) ptid.tid ();
+
+  xsnprintf (buf, sizeof (buf), "PID: %5li%s, 0x%s",
+	     pid, ((lk_task_running (task) != LK_CPU_INVAL) ? "*" : ""),
+	     phex (task, lk_builtin_type_size (unsigned_long)));
+
+  return buf;
+}
+
+/* Function for targets to_thread_name hook.  */
+
+static const char *
+lk_thread_name (struct target_ops *target, struct thread_info *ti)
+{
+  static char buf[LK_TASK_COMM_LEN + 1];
+  char tmp[LK_TASK_COMM_LEN + 1];
+  CORE_ADDR task, comm;
+  size_t size;
+
+  size = std::min ((unsigned int) LK_TASK_COMM_LEN,
+		   LK_ARRAY_LEN(LK_FIELD (task_struct, comm)));
+
+  task = (CORE_ADDR) ti->ptid.tid ();
+  comm = task + LK_OFFSET (task_struct, comm);
+  read_memory (comm, (gdb_byte *) tmp, size);
+
+  xsnprintf (buf, sizeof (buf), "%-16s", tmp);
+
+  return buf;
+}
+
+/* Functions to initialize and free target_ops and its private data.  As well
+   as functions for targets to_open/close/detach hooks.  */
+
+/* Check if OBFFILE is a Linux kernel.  */
+
+static int
+lk_is_linux_kernel (struct objfile *objfile)
+{
+  int ok = 0;
+
+  if (objfile == NULL || !(objfile->flags & OBJF_MAINLINE))
+    return 0;
+
+  ok += lookup_minimal_symbol ("linux_banner", NULL, objfile).minsym != NULL;
+  ok += lookup_minimal_symbol ("_stext", NULL, objfile).minsym != NULL;
+  ok += lookup_minimal_symbol ("_etext", NULL, objfile).minsym != NULL;
+
+  return (ok > 2);
+}
+
+/* Initialize struct lk_private.  */
+
+static void
+lk_init_private ()
+{
+  linux_kernel_ops->to_data = XCNEW (struct lk_private);
+  LK_PRIVATE->hooks = XCNEW (struct lk_private_hooks);
+  LK_PRIVATE->data = htab_create_alloc (31, (htab_hash) lk_hash_private_data,
+					(htab_eq) lk_private_data_eq, NULL,
+					xcalloc, xfree);
+}
+
+/* Initialize architecture independent private data.  Must be called
+   _after_ symbol tables were initialized.  */
+
+static void
+lk_init_private_data ()
+{
+  if (LK_PRIVATE->data != NULL)
+    htab_empty (LK_PRIVATE->data);
+
+  LK_DECLARE_FIELD (task_struct, tasks);
+  LK_DECLARE_FIELD (task_struct, pid);
+  LK_DECLARE_FIELD (task_struct, tgid);
+  LK_DECLARE_FIELD (task_struct, thread_group);
+  LK_DECLARE_FIELD (task_struct, comm);
+  LK_DECLARE_FIELD (task_struct, thread);
+
+  LK_DECLARE_FIELD (list_head, next);
+  LK_DECLARE_FIELD (list_head, prev);
+
+  LK_DECLARE_FIELD (rq, curr);
+
+  LK_DECLARE_FIELD (cpumask, bits);
+
+  LK_DECLARE_ADDR (init_task);
+  LK_DECLARE_ADDR (runqueues);
+  LK_DECLARE_ADDR (__per_cpu_offset);
+  LK_DECLARE_ADDR (init_mm);
+
+  LK_DECLARE_ADDR_ALIAS (__cpu_online_mask, cpu_online_mask);	/* linux 4.5+ */
+  LK_DECLARE_ADDR_ALIAS (cpu_online_bits, cpu_online_mask);	/* linux -4.4 */
+  if (LK_ADDR (cpu_online_mask) == -1)
+    error (_("Could not find address cpu_online_mask.  Aborting."));
+}
+
+/* Frees the cpu to old ptid map.  */
+
+static void
+lk_free_ptid_map ()
+{
+  while (LK_PRIVATE->old_ptid)
+    {
+      struct lk_ptid_map *tmp;
+
+      tmp = LK_PRIVATE->old_ptid;
+      LK_PRIVATE->old_ptid = tmp->next;
+      XDELETE (tmp);
+    }
+}
+
+/* Initialize the cpu to old ptid map.  Prefer the arch dependent
+   map_running_task_to_cpu hook if provided, else assume that the PID used
+   by target beneath is the same as in task_struct PID task_struct.  See
+   comment on lk_ptid_map in lk-low.h for details.  */
+
+static void
+lk_init_ptid_map ()
+{
+  struct thread_info *ti;
+  ULONGEST *cpu_online_mask;
+  size_t size;
+  unsigned int cpu;
+  struct cleanup *old_chain;
+
+  if (LK_PRIVATE->old_ptid != NULL)
+    lk_free_ptid_map ();
+
+  size = LK_BITMAP_SIZE (cpumask);
+  cpu_online_mask = lk_read_bitmap (LK_ADDR (cpu_online_mask), size);
+  old_chain = make_cleanup (xfree, cpu_online_mask);
+
+  ALL_THREADS (ti)
+    {
+      struct lk_ptid_map *ptid_map = XCNEW (struct lk_ptid_map);
+      CORE_ADDR rq, curr;
+      int pid;
+
+      /* Give the architecture a chance to overwrite default behaviour.  */
+      if (LK_HOOK->map_running_task_to_cpu)
+	{
+	  ptid_map->cpu = LK_HOOK->map_running_task_to_cpu (ti);
+	}
+      else
+	{
+	  LK_BITMAP_FOR_EACH_SET_BIT (cpu_online_mask, size, cpu)
+	    {
+	      rq = LK_ADDR (runqueues) + lk_get_percpu_offset (cpu);
+	      curr = lk_read_addr (rq + LK_OFFSET (rq, curr));
+	      pid = lk_read_int (curr + LK_OFFSET (task_struct, pid));
+
+	      if (pid == ti->ptid.lwp ())
+		{
+		  ptid_map->cpu = cpu;
+		  break;
+		}
+	    }
+	  if (cpu == size)
+	    error (_("Could not map thread with pid %d, lwp %lu to a cpu."),
+		   ti->ptid.pid (), ti->ptid.lwp ());
+	}
+      ptid_map->old_ptid = ti->ptid;
+      ptid_map->next = LK_PRIVATE->old_ptid;
+      LK_PRIVATE->old_ptid = ptid_map;
+    }
+
+  do_cleanups (old_chain);
+}
+
+/* Initializes all private data and pushes the linux kernel target, if not
+   already done.  */
+
+static void
+lk_try_push_target ()
+{
+  struct gdbarch *gdbarch;
+
+  gdbarch = current_inferior ()->gdbarch;
+  if (!(gdbarch && gdbarch_lk_init_private_p (gdbarch)))
+    error (_("Linux kernel debugging not supported on %s."),
+	   gdbarch_bfd_arch_info (gdbarch)->printable_name);
+
+  lk_init_private ();
+  lk_init_private_data ();
+  gdbarch_lk_init_private (gdbarch);
+  /* Check for required arch hooks.  */
+  gdb_assert (LK_HOOK->get_registers);
+
+  lk_init_ptid_map ();
+  lk_update_thread_list (linux_kernel_ops);
+
+  if (!target_is_pushed (linux_kernel_ops))
+    push_target (linux_kernel_ops);
+}
+
+/* Function for targets to_open hook.  */
+
+static void
+lk_open (const char *args, int from_tty)
+{
+  struct objfile *objfile;
+
+  if (target_is_pushed (linux_kernel_ops))
+    {
+      printf_unfiltered (_("Linux kernel target already pushed.  Aborting\n"));
+      return;
+    }
+
+  for (objfile = current_program_space->objfiles; objfile;
+       objfile = objfile->next)
+    {
+      if (lk_is_linux_kernel (objfile)
+	  && inferior_ptid.pid () != 0)
+	{
+	  lk_try_push_target ();
+	  return;
+	}
+    }
+  printf_unfiltered (_("Could not find a valid Linux kernel object file.  "
+		       "Aborting.\n"));
+}
+
+/* Function for targets to_close hook.  Deletes all private data.  */
+
+static void
+lk_close (struct target_ops *ops)
+{
+  htab_delete (LK_PRIVATE->data);
+  lk_free_ptid_map ();
+  XDELETE (LK_PRIVATE->hooks);
+
+  XDELETE (LK_PRIVATE);
+  linux_kernel_ops->to_data = NULL;
+}
+
+/* Function for targets to_detach hook.  */
+
+static void
+lk_detach (struct target_ops *t, const char *args, int from_tty)
+{
+  struct target_ops *beneath = linux_kernel_ops->beneath;
+
+  unpush_target (linux_kernel_ops);
+  reinit_frame_cache ();
+  if (from_tty)
+    printf_filtered (_("Linux kernel target detached.\n"));
+
+  beneath->to_detach (beneath, args, from_tty);
+}
+
+/* Function for new objfile observer.  */
+
+static void
+lk_observer_new_objfile (struct objfile *objfile)
+{
+  if (lk_is_linux_kernel (objfile)
+      && inferior_ptid.pid () != 0)
+    lk_try_push_target ();
+}
+
+/* Function for inferior created observer.  */
+
+static void
+lk_observer_inferior_created (struct target_ops *ops, int from_tty)
+{
+  struct objfile *objfile;
+
+  if (inferior_ptid.pid () == 0)
+    return;
+
+  for (objfile = current_inferior ()->pspace->objfiles; objfile;
+       objfile = objfile->next)
+    {
+      if (lk_is_linux_kernel (objfile))
+	{
+	  lk_try_push_target ();
+	  return;
+	}
+    }
+}
+
+/* Initialize linux kernel target.  */
+
+static void
+init_linux_kernel_ops (void)
+{
+  struct target_ops *t;
+
+  if (linux_kernel_ops != NULL)
+    return;
+
+  t = XCNEW (struct target_ops);
+  t->to_shortname = "linux-kernel";
+  t->to_longname = "linux kernel support";
+  t->to_doc = "Adds support to debug the Linux kernel";
+
+  /* set t->to_data = struct lk_private in lk_init_private.  */
+
+  t->to_open = lk_open;
+  t->to_close = lk_close;
+  t->to_detach = lk_detach;
+  t->to_fetch_registers = lk_fetch_registers;
+  t->to_update_thread_list = lk_update_thread_list;
+  t->to_pid_to_str = lk_pid_to_str;
+  t->to_thread_name = lk_thread_name;
+
+  t->to_stratum = thread_stratum;
+  t->to_magic = OPS_MAGIC;
+
+  linux_kernel_ops = t;
+
+  add_target (t);
+}
+
+/* Provide a prototype to silence -Wmissing-prototypes.  */
+extern initialize_file_ftype _initialize_linux_kernel;
+
+void
+_initialize_linux_kernel (void)
+{
+  init_linux_kernel_ops ();
+
+  observer_attach_new_objfile (lk_observer_new_objfile);
+  observer_attach_inferior_created (lk_observer_inferior_created);
+}
diff --git a/gdb/lk-low.h b/gdb/lk-low.h
new file mode 100644
index 0000000000..be8c5556df
--- /dev/null
+++ b/gdb/lk-low.h
@@ -0,0 +1,310 @@
+/* Basic Linux kernel support, architecture independent.
+
+   Copyright (C) 2016 Free Software Foundation, Inc.
+
+   This file is part of GDB.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 3 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
+
+#ifndef __LK_LOW_H__
+#define __LK_LOW_H__
+
+#include "target.h"
+
+extern struct target_ops *linux_kernel_ops;
+
+/* Copy constants defined in Linux kernel.  */
+#define LK_TASK_COMM_LEN 16
+#define LK_BITS_PER_BYTE 8
+
+/* Definitions used in linux kernel target.  */
+#define LK_CPU_INVAL -1U
+
+/* Private data structs for this target.  */
+/* Forward declarations.  */
+struct lk_private_hooks;
+struct lk_ptid_map;
+
+/* Short hand access to private data.  */
+#define LK_PRIVATE ((struct lk_private *) linux_kernel_ops->to_data)
+#define LK_HOOK (LK_PRIVATE->hooks)
+
+struct lk_private
+{
+  /* Hashtab for needed addresses, structs and fields.  */
+  htab_t data;
+
+  /* Linked list to map between cpu number and original ptid from target
+     beneath.  */
+  struct lk_ptid_map *old_ptid;
+
+  /* Hooks for architecture dependent functions.  */
+  struct lk_private_hooks *hooks;
+};
+
+/* We use the following convention for PTIDs:
+
+   ptid->pid = inferiors PID
+   ptid->lwp = PID from task_stuct
+   ptid->tid = address of task_struct
+
+   The task_structs address as TID has two reasons.  First, we need it quite
+   often and there is no other reasonable way to pass it down.  Second, it
+   helps us to distinguish swapper tasks as they all have PID = 0.
+
+   Furthermore we cannot rely on the target beneath to use the same PID as the
+   task_struct. Thus we need a mapping between our PTID and the PTID of the
+   target beneath. Otherwise it is impossible to pass jobs, e.g. fetching
+   registers of running tasks, to the target beneath.  */
+
+/* Private data struct to map between our and the target beneath PTID.  */
+
+struct lk_ptid_map
+{
+  struct lk_ptid_map *next;
+  unsigned int cpu;
+  ptid_t old_ptid;
+};
+
+/* Private data struct to be stored in hashtab.  */
+
+struct lk_private_data
+{
+  const char *alias;
+
+  union
+  {
+    CORE_ADDR addr;
+    struct type *type;
+    struct field *field;
+  } data;
+};
+
+/* Wrapper for htab_hash_string to work with our private data.  */
+
+static inline hashval_t
+lk_hash_private_data (const struct lk_private_data *entry)
+{
+  return htab_hash_string (entry->alias);
+}
+
+/* Function for htab_eq to work with our private data.  */
+
+static inline int
+lk_private_data_eq (const struct lk_private_data *entry,
+		    const struct lk_private_data *element)
+{
+  return streq (entry->alias, element->alias);
+}
+
+/* Wrapper for htab_find_slot to work with our private data.  Do not use
+   directly, use the macros below instead.  */
+
+static inline void **
+lk_find_slot (const char *alias)
+{
+  const struct lk_private_data dummy = { alias };
+  return htab_find_slot (LK_PRIVATE->data, &dummy, INSERT);
+}
+
+/* Wrapper for htab_find to work with our private data.  Do not use
+   directly, use the macros below instead.  */
+
+static inline struct lk_private_data *
+lk_find (const char *alias)
+{
+  const struct lk_private_data dummy = { alias };
+  return (struct lk_private_data *) htab_find (LK_PRIVATE->data, &dummy);
+}
+
+/* Functions to initialize private data.  Do not use directly, use the
+   macros below instead.  */
+
+extern struct lk_private_data *lk_init_addr (const char *name,
+					     const char *alias, int silent);
+extern struct lk_private_data *lk_init_struct (const char *name,
+					       const char *alias, int silent);
+extern struct lk_private_data *lk_init_field (const char *s_name,
+					      const char *f_name,
+					      const char *s_alias,
+					      const char *f_alias, int silent);
+
+/* The names we use to store our private data in the hashtab.  */
+
+#define LK_STRUCT_ALIAS(s_name) ("struct " #s_name)
+#define LK_FIELD_ALIAS(s_name, f_name) (#s_name " " #f_name)
+
+/* Macros to initiate addresses and fields, where (S_/F_)NAME is the variables
+   name as used in Linux.  LK_DECLARE_FIELD also initializes the corresponding
+   struct entry.  Throws an error, if no symbol with the given name is found.
+ */
+
+#define LK_DECLARE_ADDR(name) \
+  lk_init_addr (#name, #name, 0)
+#define LK_DECLARE_FIELD(s_name, f_name) \
+  lk_init_field (#s_name, #f_name, LK_STRUCT_ALIAS (s_name), \
+		 LK_FIELD_ALIAS (s_name, f_name), 0)
+
+/* Same as LK_DECLARE_*, but returns NULL instead of throwing an error if no
+   symbol was found.  The caller is responsible to check for possible errors.
+ */
+
+#define LK_DECLARE_ADDR_SILENT(name) \
+  lk_init_addr (#name, #name, 1)
+#define LK_DECLARE_FIELD_SILENT(s_name, f_name) \
+  lk_init_field (#s_name, #f_name, LK_STRUCT_ALIAS (s_name), \
+		 LK_FIELD_ALIAS (s_name, f_name), 1)
+
+/* Same as LK_DECLARE_*_SILENT, but allows you to give an ALIAS name.  If used
+   for a struct, the struct has to be declared explicitly _before_ any of its
+   fields.  They are ment to be used, when a variable in the kernel was simply
+   renamed (at least from our point of view).  The caller is responsible to
+   check for possible errors.  */
+
+#define LK_DECLARE_ADDR_ALIAS(name, alias) \
+  lk_init_addr (#name, #alias, 1)
+#define LK_DECLARE_STRUCT_ALIAS(s_name, alias) \
+  lk_init_struct (#s_name, LK_STRUCT_ALIAS (alias), 1)
+#define LK_DECLARE_FIELD_ALIAS(s_alias, f_name, f_alias) \
+  lk_init_field (NULL, #f_name, LK_STRUCT_ALIAS (s_alias), \
+		 LK_FIELD_ALIAS (s_alias, f_alias), 1)
+
+/* Macros to retrieve private data from hashtab. Returns NULL (-1) if no entry
+   with the given ALIAS exists. The caller only needs to check for possible
+   errors if not done so at initialization.  */
+
+#define LK_ADDR(alias) \
+  (lk_find (#alias) ? (lk_find (#alias))->data.addr : -1)
+#define LK_STRUCT(alias) \
+  (lk_find (LK_STRUCT_ALIAS (alias)) \
+   ? (lk_find (LK_STRUCT_ALIAS (alias)))->data.type \
+   : NULL)
+#define LK_FIELD(s_alias, f_alias) \
+  (lk_find (LK_FIELD_ALIAS (s_alias, f_alias)) \
+   ? (lk_find (LK_FIELD_ALIAS (s_alias, f_alias)))->data.field \
+   : NULL)
+
+
+/* Definitions for architecture dependent hooks.  */
+/* Hook to read registers from the target and supply their content
+   to the regcache.  */
+typedef void (*lk_hook_get_registers) (CORE_ADDR task,
+				       struct target_ops *target,
+				       struct regcache *regcache,
+				       int regnum);
+
+/* Hook to return the per_cpu_offset of cpu CPU.  Only architectures that
+   do not use the __per_cpu_offset array to determine the offset have to
+   supply this hook.  */
+typedef CORE_ADDR (*lk_hook_get_percpu_offset) (unsigned int cpu);
+
+/* Hook to map a running task to a logical CPU.  Required if the target
+   beneath uses a different PID as struct rq.  */
+typedef unsigned int (*lk_hook_map_running_task_to_cpu) (struct thread_info *ti);
+
+struct lk_private_hooks
+{
+  /* required */
+  lk_hook_get_registers get_registers;
+
+  /* optional, required if __per_cpu_offset array is not used to determine
+     offset.  */
+  lk_hook_get_percpu_offset get_percpu_offset;
+
+  /* optional, required if the target beneath uses a different PID as struct
+     rq.  */
+  lk_hook_map_running_task_to_cpu map_running_task_to_cpu;
+};
+
+/* Helper functions to read and return a value at a given ADDRess.  */
+extern int lk_read_int (CORE_ADDR addr);
+extern unsigned int lk_read_uint (CORE_ADDR addr);
+extern LONGEST lk_read_long (CORE_ADDR addr);
+extern ULONGEST lk_read_ulong (CORE_ADDR addr);
+extern CORE_ADDR lk_read_addr (CORE_ADDR addr);
+
+/* Reads a bitmap at a given ADDRess of size SIZE (in bits). Allocates and
+   returns an array of ulongs.  The caller is responsible to free the array
+   after it is no longer needed.  */
+extern ULONGEST *lk_read_bitmap (CORE_ADDR addr, size_t size);
+
+/* Walks the bitmap BITMAP of size SIZE from bit (index) BIT.
+   Returns the index of the next set bit or SIZE, when the end of the bitmap
+   was reached.  To iterate over all set bits use macro
+   LK_BITMAP_FOR_EACH_SET_BIT defined below.  */
+extern size_t lk_bitmap_find_next_bit (ULONGEST *bitmap, size_t bit,
+				       size_t size);
+#define LK_BITMAP_FOR_EACH_SET_BIT(bitmap, size, bit)			\
+  for ((bit) = lk_bitmap_find_next_bit ((bitmap), (size), 0);		\
+       (bit) < (size);							\
+       (bit) = lk_bitmap_find_next_bit ((bitmap), (size), (bit) + 1))
+
+/* Returns the size of BITMAP in bits.  */
+#define LK_BITMAP_SIZE(bitmap) \
+  (FIELD_SIZE (LK_FIELD (bitmap, bits)) * LK_BITS_PER_BYTE)
+
+/* Returns the Hamming weight, i.e. number of set bits, of bitmap BITMAP with
+   size SIZE (in bits).  */
+extern size_t lk_bitmap_hweight (ULONGEST *bitmap, size_t size);
+
+
+/* Short hand access to current gdbarchs builtin types and their
+   size (in byte).  For TYPE replace spaces " " by underscore "_", e.g.
+   "unsigned int" => "unsigned_int".  */
+#define lk_builtin_type(type)					\
+  (builtin_type (current_inferior ()->gdbarch)->builtin_##type)
+#define lk_builtin_type_size(type)		\
+  (lk_builtin_type (type)->length)
+
+/* If field FIELD is an array returns its length (in #elements).  */
+#define LK_ARRAY_LEN(field)			\
+  (FIELD_SIZE (field) / FIELD_TARGET_SIZE (field))
+
+/* Short hand access to the offset of field F_NAME in struct S_NAME.  */
+#define LK_OFFSET(s_name, f_name)		\
+  (FIELD_OFFSET (LK_FIELD (s_name, f_name)))
+
+/* Returns the container of field FNAME of struct SNAME located at address
+   ADDR.  */
+#define LK_CONTAINER_OF(addr, sname, fname)		\
+  ((addr) - LK_OFFSET (sname, fname))
+
+/* Divides nominator N by denominator D and rounds up the result.  */
+#define LK_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+
+/* Additional access macros to fields in the style of gdbtypes.h */
+/* Returns the size of field FIELD (in bytes). If FIELD is an array returns
+   the size of the whole array.  */
+#define FIELD_SIZE(field)			\
+  TYPE_LENGTH (check_typedef (FIELD_TYPE (*field)))
+
+/* Returns the size of the target type of field FIELD (in bytes).  If FIELD is
+   an array returns the size of its elements.  */
+#define FIELD_TARGET_SIZE(field)		\
+  TYPE_LENGTH (check_typedef (TYPE_TARGET_TYPE (FIELD_TYPE (*field))))
+
+/* Returns the offset of field FIELD (in bytes).  */
+#define FIELD_OFFSET(field)			\
+  (FIELD_BITPOS (*field) / TARGET_CHAR_BIT)
+
+/* Provides the per_cpu_offset of cpu CPU.  If the architecture
+   provides a get_percpu_offset hook, the call is passed to it.  Otherwise
+   returns the __per_cpu_offset[CPU] element.  */
+extern CORE_ADDR lk_get_percpu_offset (unsigned int cpu);
+
+/* Tests if a given task TASK is running. Returns either the cpu-id
+   if running or LK_CPU_INVAL if not.  */
+extern unsigned int lk_task_running (CORE_ADDR task);
+#endif /* __LK_LOW_H__ */
-- 
2.11.2


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]