This is the mail archive of the
libc-alpha@sources.redhat.com
mailing list for the glibc project.
[Patch] Memory lock executable at load time
- From: Corey Minyard <minyard at acm dot org>
- To: libc-alpha at sources dot redhat dot com
- Date: Sat, 04 May 2002 11:07:57 -0500
- Subject: [Patch] Memory lock executable at load time
This time with the patch actually attached.
The following patch allows you to memory-lock executables when they are
loaded, either with mlock() or by reading the file in instead of mmaping
it. To activate this, set the environment variable "LD_LOCK_MEMORY" to
either "READ" or "MLOCK". It also adds parameters to ld.so to do this.
To get the full effect of this, you also need to have no swap to avoid
stack segments and data segments getting paged.
-Corey
--- ./elf/dl-open.c.exec_lock Thu May 2 09:54:30 2002
+++ ./elf/dl-open.c Thu May 2 09:54:42 2002
@@ -228,7 +228,9 @@
/* Load the named object. */
args->map = new = _dl_map_object (NULL, file, 0, lt_loaded, 0,
- mode);
+ mode,
+ mode & (RTLD_LOCK_LIB_PAGES
+ | RTLD_LOCK_MASK));
/* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
set and the object is not already loaded. */
@@ -258,7 +260,9 @@
}
/* Load that object's dependencies. */
- _dl_map_object_deps (new, NULL, 0, 0);
+ _dl_map_object_deps (new, NULL, 0, 0,
+ mode & (RTLD_LOCK_DEPENDENT_LIB_PAGES
+ | RTLD_LOCK_MASK));
/* So far, so good. Now check the versions. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
--- ./elf/dl-deps.c.exec_lock Thu May 2 09:54:30 2002
+++ ./elf/dl-deps.c Thu May 2 09:54:42 2002
@@ -52,6 +52,7 @@
/* The arguments to openaux. */
struct link_map *map;
int trace_mode;
+ int locked_load_mode;
const char *strtab;
const char *name;
@@ -67,7 +68,7 @@
args->aux = _dl_map_object (args->map, args->name, 0,
(args->map->l_type == lt_executable
? lt_library : args->map->l_type),
- args->trace_mode, 0);
+ args->trace_mode, 0, args->locked_load_mode);
}
@@ -136,7 +137,7 @@
internal_function
_dl_map_object_deps (struct link_map *map,
struct link_map **preloads, unsigned int npreloads,
- int trace_mode)
+ int trace_mode, int locked_load_mode)
{
struct list known[1 + npreloads + 1];
struct list *runp, *utail, *dtail;
@@ -228,6 +229,7 @@
args.strtab = strtab;
args.map = l;
args.trace_mode = trace_mode;
+ args.locked_load_mode = locked_load_mode;
orig = runp;
for (d = l->l_ld; d->d_tag != DT_NULL; ++d)
--- ./elf/dl-load.c.exec_lock Thu May 2 09:54:30 2002
+++ ./elf/dl-load.c Thu May 2 09:54:42 2002
@@ -754,21 +754,78 @@
struct link_map *
_dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
char *realname, struct link_map *loader, int l_type,
- int mode)
+ int mode, int locked_load_mode)
{
struct link_map *l = NULL;
+ /* Read the whole thing in, restarting due to interrupts if necessary. */
+ static size_t
+ safe_read (int fd, caddr_t mapat, size_t len)
+ {
+ size_t rv;
+ size_t total = 0;
+
+ while (total < len)
+ {
+ rv = __read(fd, mapat + total, len - total);
+ if (rv == -1)
+ {
+ if (errno == EINTR) continue;
+ return -1;
+ }
+ else if (rv == 0)
+ {
+ return total;
+ }
+ total += rv;
+ }
+
+ return total;
+ }
+
auto inline caddr_t map_segment (ElfW(Addr) mapstart, size_t len,
int prot, int fixed, off_t offset);
inline caddr_t map_segment (ElfW(Addr) mapstart, size_t len,
int prot, int fixed, off_t offset)
{
- caddr_t mapat = __mmap ((caddr_t) mapstart, len, prot,
- fixed|MAP_COPY|MAP_FILE,
- fd, offset);
- if (mapat == MAP_FAILED)
- LOSE (errno, N_("failed to map segment from shared object"));
+ caddr_t mapat;
+
+ if (! (locked_load_mode & (RTLD_LOCK_DEPENDENT_LIB_PAGES
+ | RTLD_LOCK_LIB_PAGES)))
+ {
+ mapat = __mmap ((caddr_t) mapstart, len, prot,
+ fixed|MAP_COPY|MAP_FILE,
+ fd, offset);
+ if (mapat == MAP_FAILED)
+ LOSE (errno, "failed to map segment from shared object");
+ }
+ else if (locked_load_mode & RTLD_LOCK_MLOCK)
+ {
+ mapat = __mmap ((caddr_t) mapstart, len, prot,
+ fixed|MAP_COPY|MAP_FILE,
+ fd, offset);
+ if (mapat == MAP_FAILED)
+ LOSE (errno, "failed to map segment from shared object");
+ if (mlock((caddr_t) mapat, len) != 0)
+ {
+ LOSE (errno, "failed to memory lock segment from shared object");
+ __munmap((caddr_t) mapat, len);
+ }
+ }
+ else
+ {
+ mapat = __mmap ((caddr_t) mapstart, len, prot | PROT_WRITE,
+ fixed|MAP_ANON|MAP_PRIVATE, ANONFD, 0);
+ if (mapat == MAP_FAILED)
+ LOSE (errno, "failed to map anonymous segment");
+ if (__lseek(fd, offset, SEEK_SET) != offset)
+ LOSE (errno, "Failed to seek file for mapping");
+ if (safe_read(fd, mapat, len) == -1)
+ LOSE (errno, "Failed to read file into memory");
+ if (__mprotect(mapat, len, prot) == -1)
+ LOSE (errno, "Failed to set protection for memory");
+ }
return mapat;
}
@@ -1142,8 +1199,19 @@
_dl_initfirst = l;
/* Finally the file information. */
- l->l_dev = st.st_dev;
- l->l_ino = st.st_ino;
+ if (locked_load_mode)
+ {
+ /* If the file is to be closed, it can be deleted and the inode
+ reused. This mechanism shouldn't be used for standard libraries
+ anyway, only manual loading, so don't worry about it. */
+ l->l_dev = -1;
+ l->l_ino = -1;
+ }
+ else
+ {
+ l->l_dev = st.st_dev;
+ l->l_ino = st.st_ino;
+ }
return l;
}
@@ -1503,7 +1571,7 @@
struct link_map *
internal_function
_dl_map_object (struct link_map *loader, const char *name, int preloaded,
- int type, int trace_mode, int mode)
+ int type, int trace_mode, int mode, int locked_load_mode)
{
int fd;
char *realname;
@@ -1739,5 +1807,6 @@
_dl_signal_error (errno, name, N_("cannot open shared object file"));
}
- return _dl_map_object_from_fd (name, fd, &fb, realname, loader, type, mode);
+ return _dl_map_object_from_fd (name, fd, &fb, realname, loader, type, mode,
+ locked_load_mode);
}
--- ./elf/rtld.c.exec_lock Thu May 2 09:54:30 2002
+++ ./elf/rtld.c Thu May 2 11:49:30 2002
@@ -66,6 +66,8 @@
const char *_dl_profile;
const char *_dl_profile_output;
struct link_map *_dl_profile_map;
+/* What lock mode are executables loaded with by default. */
+int _dl_locked_load_mode;
int _dl_lazy = 1;
/* XXX I know about at least one case where we depend on the old weak
behavior (it has to do with librt). Until we get DSO groups implemented
@@ -297,7 +299,8 @@
map_doit (void *a)
{
struct map_args *args = (struct map_args *) a;
- args->main_map = _dl_map_object (NULL, args->str, 0, lt_library, 0, 0);
+ args->main_map = _dl_map_object (NULL, args->str, 0, lt_library, 0, 0,
+ _dl_locked_load_mode);
}
static void
@@ -356,6 +359,66 @@
return 0;
}
+#ifdef MAP_ANON
+/* The fd is not examined when using MAP_ANON. */
+# define ANONFD -1
+#else
+extern int _dl_zerofd = -1;
+# define ANONFD _dl_zerofd
+#endif
+
+static void
+lock_exec_pages(void *loaded, caddr_t size, int flags)
+{
+ ElfW(Addr) start = (ElfW(Addr)) loaded;
+ void *mapat, *newmap;
+
+ if ((flags & RTLD_LOCK_MASK) == RTLD_LOCK_MLOCK)
+ {
+ /* mlock mode, just mlock it. */
+ if (mlock((caddr_t) start, size) != 0)
+ {
+ _dl_fatal_printf("Could not lock executable into memory\n");
+ _exit(EXIT_FAILURE);
+ }
+ }
+ else
+ {
+ /* Read mode is trickier, we have to do some funny mapping
+ first. We map some temporary memory to copy the executable
+ to, copy it, remap the old region, then copy it back. */
+ mapat = __mmap(0, size,
+ PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE,
+ ANONFD, 0);
+ if (mapat == MAP_FAILED)
+ {
+ _dl_fatal_printf("Could allocate temp memory for the executable\n");
+ _exit(EXIT_FAILURE);
+ }
+
+ memcpy(mapat, (void *) start, size);
+
+ if (munmap((void *) start, size) != 0)
+ {
+ _dl_fatal_printf("Could unmap the old executable\n");
+ _exit(EXIT_FAILURE);
+ }
+
+ newmap = __mmap((caddr_t) start, size,
+ PROT_EXEC | PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_ANON | MAP_PRIVATE,
+ ANONFD, 0);
+ if (newmap == MAP_FAILED)
+ {
+ _dl_fatal_printf("Could allocate memory for the executable\n");
+ _exit(EXIT_FAILURE);
+ }
+
+ memcpy((void *) start, mapat, size);
+ munmap(mapat, size);
+ }
+}
+
static const char *library_path; /* The library search path. */
static const char *preloadlist; /* The list preloaded objects. */
static int version_info; /* Nonzero if information about
@@ -381,6 +444,8 @@
hp_timing_t diff;
#endif
+ _dl_locked_load_mode = 0;
+
/* Process the environment variable which control the behaviour. */
process_envvars (&mode);
@@ -427,6 +492,22 @@
--_dl_argc;
++_dl_argv;
}
+ else if (! strcmp (_dl_argv[1], "--mlock"))
+ {
+ _dl_locked_load_mode = (RTLD_LOCK_LIB_PAGES | RTLD_LOCK_MLOCK);
+
+ ++_dl_skip_args;
+ --_dl_argc;
+ ++_dl_argv;
+ }
+ else if (! strcmp (_dl_argv[1], "--readload"))
+ {
+ _dl_locked_load_mode = (RTLD_LOCK_LIB_PAGES | RTLD_LOCK_READ);
+
+ ++_dl_skip_args;
+ --_dl_argc;
+ ++_dl_argv;
+ }
else if (! strcmp (_dl_argv[1], "--library-path") && _dl_argc > 2)
{
library_path = _dl_argv[2];
@@ -466,6 +547,8 @@
--list list all dependencies and how they are resolved\n\
--verify verify that given object really is a dynamically linked\n\
object we can handle\n\
+ --mlock Lock the loaded executable pages into memory with mlock\n\
+ --readload Load all executable pages without mmapping the file\n\
--library-path PATH use given PATH instead of content of the environment\n\
variable LD_LIBRARY_PATH\n\
--inhibit-rpath LIST ignore RUNPATH and RPATH information in object names\n\
@@ -497,7 +580,8 @@
else
{
HP_TIMING_NOW (start);
- _dl_map_object (NULL, _dl_argv[0], 0, lt_library, 0, 0);
+ _dl_map_object (NULL, _dl_argv[0], 0, lt_library, 0, 0,
+ _dl_locked_load_mode);
HP_TIMING_NOW (stop);
HP_TIMING_DIFF (load_time, start, stop);
@@ -596,7 +680,12 @@
ElfW(Addr) mapstart;
mapstart = _dl_loaded->l_addr + (ph->p_vaddr & ~(ph->p_align - 1));
if (_dl_loaded->l_map_start > mapstart)
- _dl_loaded->l_map_start = mapstart;
+ {
+ _dl_loaded->l_map_start = mapstart;
+ _dl_loaded->l_map_end = mapstart + ph->p_memsz;
+ if (_dl_locked_load_mode)
+ lock_exec_pages(mapstart, ph->p_memsz, _dl_locked_load_mode);
+ }
}
break;
}
@@ -677,7 +766,8 @@
|| strchr (p, '/') == NULL))
{
struct link_map *new_map = _dl_map_object (_dl_loaded, p, 1,
- lt_library, 0, 0);
+ lt_library, 0, 0,
+ _dl_locked_load_mode);
if (++new_map->l_opencount == 1)
/* It is no duplicate. */
++npreloads;
@@ -745,7 +835,8 @@
if (p[0] != '\0')
{
struct link_map *new_map = _dl_map_object (_dl_loaded, p, 1,
- lt_library, 0, 0);
+ lt_library, 0, 0,
+ _dl_locked_load_mode);
if (++new_map->l_opencount == 1)
/* It is no duplicate. */
++npreloads;
@@ -756,7 +847,8 @@
{
char *p = strndupa (problem, file_size - (problem - file));
struct link_map *new_map = _dl_map_object (_dl_loaded, p, 1,
- lt_library, 0, 0);
+ lt_library, 0, 0,
+ _dl_locked_load_mode);
if (++new_map->l_opencount == 1)
/* It is no duplicate. */
++npreloads;
@@ -789,7 +881,8 @@
specified some libraries to load, these are inserted before the actual
dependencies in the executable's searchlist for symbol resolution. */
HP_TIMING_NOW (start);
- _dl_map_object_deps (_dl_loaded, preloads, npreloads, mode == trace);
+ _dl_map_object_deps (_dl_loaded, preloads, npreloads, mode == trace,
+ _dl_locked_load_mode);
HP_TIMING_NOW (stop);
HP_TIMING_DIFF (diff, start, stop);
HP_TIMING_ACCUM_NT (load_time, diff);
@@ -995,6 +1088,13 @@
_exit (0);
}
+ if (! _dl_loaded->l_info[DT_NEEDED])
+ {
+ /* The file is statically linked, we need to decrement the load
+ count to make it consistent, and we can't relocate it. */
+ _dl_nloaded--;
+ }
+ else
{
/* Now we have all the objects loaded. Relocate them all except for
the dynamic linker itself. We do this in reverse order so that copy
@@ -1357,6 +1457,12 @@
if (!__libc_enable_secure
&& memcmp (&envline[3], "ORIGIN_PATH", 11) == 0)
_dl_origin_path = &envline[15];
+
+ if (memcmp (&envline[3], "LOCK_MEMORY", 11) == 0)
+ if (strcmp(&envline[15], "MLOCK") == 0)
+ _dl_locked_load_mode = (RTLD_LOCK_LIB_PAGES | RTLD_LOCK_MLOCK);
+ else if (strcmp(&envline[15], "READ") == 0)
+ _dl_locked_load_mode = (RTLD_LOCK_LIB_PAGES | RTLD_LOCK_READ);
break;
case 12:
--- ./sysdeps/unix/sysv/linux/syscalls.list.exec_lock Thu May 2 09:54:30 2002
+++ ./sysdeps/unix/sysv/linux/syscalls.list Thu May 2 09:54:42 2002
@@ -23,7 +23,7 @@
lchown - lchown i:sii __lchown lchown
madvise - madvise i:pii posix_madvise madvise
mincore - mincore i:anV mincore
-mlock - mlock i:bn mlock
+mlock - mlock i:bn __mlock mlock
mlockall - mlockall i:i mlockall
mmap - mmap b:aniiii __mmap mmap
mount EXTRA mount i:sssip __mount mount
--- ./sysdeps/generic/bits/dlfcn.h.exec_lock Thu May 2 09:54:30 2002
+++ ./sysdeps/generic/bits/dlfcn.h Thu May 2 09:54:42 2002
@@ -32,6 +32,30 @@
visible as if the object were linked directly into the program. */
#define RTLD_GLOBAL 0x00100
+/* Lock the pages of the loaded library into memory so they won't be
+ demand paged into memory. If you choose this, you need to also
+ choose one of RTLD_LOCK_MLOCK or RTLD_LOCK_READ below. */
+#define RTLD_LOCK_LIB_PAGES 0x200
+
+/* If the following bit is set in the MODE argument to `dlopen',
+ the dependent files will also have their pages locked into memory. */
+#define RTLD_LOCK_DEPENDENT_LIB_PAGES 0x400
+
+/* If the following is specified in the MODE argument to `dlopen',
+ the file will be read into memory but not mapped directly from
+ the file. This leave the file unused, thus it can be deleted,
+ changed, etc. without any side effects. However, the loaded
+ pages cannot be shared with other processes. */
+#define RTLD_LOCK_READ 0x000
+
+/* If the following is specified in the MODE argument to `dlopen',
+ the file's contents will be mlock()-ed into memory so they will
+ not be demand paged or paged out. You must be root to do this. */
+#define RTLD_LOCK_MLOCK 0x800
+
+/* Mask for the above two options. */
+#define RTLD_LOCK_MASK 0x800
+
/* Unix98 demands the following flag which is the inverse to RTLD_GLOBAL.
The implementation does this by default and so we can define the
value to zero. */
--- ./sysdeps/generic/ldsodefs.h.exec_lock Thu May 2 09:54:30 2002
+++ ./sysdeps/generic/ldsodefs.h Thu May 2 09:54:42 2002
@@ -301,7 +301,8 @@
value to allow additional security checks. */
extern struct link_map *_dl_map_object (struct link_map *loader,
const char *name, int preloaded,
- int type, int trace_mode, int mode)
+ int type, int trace_mode, int mode,
+ int locked_load_mode)
internal_function;
/* Call _dl_map_object on the dependencies of MAP, and set up
@@ -310,7 +311,8 @@
but before its dependencies. */
extern void _dl_map_object_deps (struct link_map *map,
struct link_map **preloads,
- unsigned int npreloads, int trace_mode)
+ unsigned int npreloads, int trace_mode,
+ int locked_load_mode)
internal_function;
/* Cache the locations of MAP's hash table. */