This is the mail archive of the libc-hacker@sourceware.cygnus.com mailing list for the glibc project.

Note that libc-hacker is a closed list. You may look at the archives of this list, but subscription and posting are not open.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]

malloc patch for overflow detection


OK, here is what I promised I would come up with to make even the
strangest malloc test programs happy.  It isn't perfect, but I can't
measure a significant slowdown from the non-overflow checking version
on Intel.  If the patch cannot be applied to current glibc (it should)
just yell and I will provide a line-number-clean version.

Incidentally, my last malloc patch possibly wasn't applied completely,
the realloc() part was dropped.  Should I send it again?

Regards,
Wolfram.

1999-07-04  Wolfram Gloger  <wmglo@dent.med.uni-muenchen.de>

	    * malloc/malloc.c (request2size): Check for overflow and return
	    NULL whenever it is encountered.

--- ptmalloc.c	1999/07/04 20:34:03	1.1.1.10
+++ ptmalloc.c	1999/07/04 21:08:24
@@ -1256,12 +1256,12 @@
 #define chunk2mem(p)   ((Void_t*)((char*)(p) + 2*SIZE_SZ))
 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
 
-/* pad request bytes into a usable size */
+/* pad request bytes into a usable size, return non-zero on overflow */
 
-#define request2size(req) \
- (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
-  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
-   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
+#define request2size(req, nb) \
+ ((nb = (req) + (SIZE_SZ + MALLOC_ALIGN_MASK)),\
+  ((long)nb <= 0 ? 1 : ((nb < (MINSIZE + MALLOC_ALIGN_MASK) ? (nb = MINSIZE) :\
+                         (nb &= ~MALLOC_ALIGN_MASK)), 0)))
 
 /* Check if m has acceptable alignment */
 
@@ -2623,7 +2623,8 @@
   }
 #endif
 
-  nb = request2size(bytes);
+  if(request2size(bytes, nb))
+    return 0;
   arena_get(ar_ptr, nb);
   if(!ar_ptr)
     return 0;
@@ -3126,7 +3127,8 @@
   oldp    = mem2chunk(oldmem);
   oldsize = chunksize(oldp);
 
-  nb = request2size(bytes);
+  if(request2size(bytes, nb))
+    return 0;
 
 #if HAVE_MMAP
   if (chunk_is_mmapped(oldp))
@@ -3387,7 +3389,8 @@
 
   if (alignment <  MINSIZE) alignment = MINSIZE;
 
-  nb = request2size(bytes);
+  if(request2size(bytes, nb))
+    return 0;
   arena_get(ar_ptr, nb + alignment + MINSIZE);
   if(!ar_ptr)
     return 0;
@@ -3558,7 +3561,8 @@
   }
 #endif
 
-  sz = request2size(n * elem_size);
+  if(request2size(n * elem_size, sz))
+    return 0;
   arena_get(ar_ptr, sz);
   if(!ar_ptr)
     return 0;
@@ -4344,8 +4348,10 @@
 #endif
 {
   mchunkptr victim;
-  INTERNAL_SIZE_T nb = request2size(sz + 1);
+  INTERNAL_SIZE_T nb;
 
+  if(request2size(sz+1, nb))
+    return 0;
   (void)mutex_lock(&main_arena.mutex);
   victim = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
   (void)mutex_unlock(&main_arena.mutex);
@@ -4410,7 +4416,10 @@
   }
   oldsize = chunksize(oldp);
 
-  nb = request2size(bytes+1);
+  if(request2size(bytes+1, nb)) {
+    (void)mutex_unlock(&main_arena.mutex);
+    return 0;
+  }
 
 #if HAVE_MMAP
   if (chunk_is_mmapped(oldp)) {
@@ -4466,7 +4475,8 @@
   if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes);
   if (alignment <  MINSIZE) alignment = MINSIZE;
 
-  nb = request2size(bytes+1);
+  if(request2size(bytes+1, nb))
+    return 0;
   (void)mutex_lock(&main_arena.mutex);
   p = (top_check() >= 0) ? chunk_align(&main_arena, nb, alignment) : NULL;
   (void)mutex_unlock(&main_arena.mutex);
@@ -4486,7 +4496,12 @@
 malloc_starter(sz) size_t sz;
 #endif
 {
-  mchunkptr victim = chunk_alloc(&main_arena, request2size(sz));
+  INTERNAL_SIZE_T nb;
+  mchunkptr victim;
+
+  if(request2size(sz, nb))
+    return 0;
+  victim = chunk_alloc(&main_arena, nb);
 
   return victim ? chunk2mem(victim) : 0;
 }
@@ -4522,16 +4537,20 @@
 #endif
 {
   Void_t *vptr = NULL;
+  INTERNAL_SIZE_T nb;
   mchunkptr victim;
 
   tsd_getspecific(arena_key, vptr);
   if(!vptr) {
     if(save_malloc_hook != malloc_check) {
-      victim = chunk_alloc(&main_arena, request2size(sz));
+      if(request2size(sz, nb))
+        return 0;
+      victim = chunk_alloc(&main_arena, nb);
       return victim ? chunk2mem(victim) : 0;
     } else {
-      if(top_check() < 0) return 0;
-      victim = chunk_alloc(&main_arena, request2size(sz+1));
+      if(top_check()<0 || request2size(sz+1, nb))
+        return 0;
+      victim = chunk_alloc(&main_arena, nb);
       return victim ? chunk2mem_check(victim, sz) : 0;
     }
   } else {

-- 
`Surf the sea, not double-u three...'
wmglo@dent.med.uni-muenchen.de

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]