2002-01-04 07:12:29 +08:00
|
|
|
/*
|
|
|
|
* Tiny C Memory and bounds checker
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002 Fabrice Bellard
|
|
|
|
*
|
2007-11-15 01:34:30 +08:00
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
2002-01-04 07:12:29 +08:00
|
|
|
*
|
2007-11-15 01:34:30 +08:00
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
2002-01-04 07:12:29 +08:00
|
|
|
*
|
2007-11-15 01:34:30 +08:00
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
2002-01-04 07:12:29 +08:00
|
|
|
*/
|
2002-07-25 06:11:56 +08:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <string.h>
|
2010-09-11 03:09:07 +08:00
|
|
|
#if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) \
|
|
|
|
&& !defined(__DragonFly__) && !defined(__OpenBSD__)
|
2002-07-25 06:11:56 +08:00
|
|
|
#include <malloc.h>
|
2002-12-08 22:34:30 +08:00
|
|
|
#endif
|
2002-01-04 07:12:29 +08:00
|
|
|
|
|
|
|
//#define BOUND_DEBUG
|
|
|
|
|
|
|
|
/* define so that bound array is static (faster, but use memory if
|
|
|
|
bound checking not used) */
|
|
|
|
//#define BOUND_STATIC
|
|
|
|
|
2002-03-04 06:45:55 +08:00
|
|
|
/* use malloc hooks. Currently the code cannot be reliable if no hooks */
|
|
|
|
#define CONFIG_TCC_MALLOC_HOOKS
|
2002-12-08 22:34:30 +08:00
|
|
|
#define HAVE_MEMALIGN
|
|
|
|
|
2010-09-11 03:09:07 +08:00
|
|
|
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
|
|
|
|
|| defined(__DragonFly__) || defined(__dietlibc__) \
|
2009-12-20 05:22:43 +08:00
|
|
|
|| defined(__UCLIBC__) || defined(__OpenBSD__) || defined(_WIN32)
|
|
|
|
#warning Bound checking does not support malloc (etc.) in this environment.
|
2002-12-08 22:34:30 +08:00
|
|
|
#undef CONFIG_TCC_MALLOC_HOOKS
|
|
|
|
#undef HAVE_MEMALIGN
|
|
|
|
#endif
|
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
#define BOUND_T1_BITS 13
|
|
|
|
#define BOUND_T2_BITS 11
|
|
|
|
#define BOUND_T3_BITS (32 - BOUND_T1_BITS - BOUND_T2_BITS)
|
|
|
|
|
|
|
|
#define BOUND_T1_SIZE (1 << BOUND_T1_BITS)
|
|
|
|
#define BOUND_T2_SIZE (1 << BOUND_T2_BITS)
|
|
|
|
#define BOUND_T3_SIZE (1 << BOUND_T3_BITS)
|
|
|
|
#define BOUND_E_BITS 4
|
|
|
|
|
|
|
|
#define BOUND_T23_BITS (BOUND_T2_BITS + BOUND_T3_BITS)
|
|
|
|
#define BOUND_T23_SIZE (1 << BOUND_T23_BITS)
|
|
|
|
|
|
|
|
|
|
|
|
/* this pointer is generated when bound check is incorrect */
|
|
|
|
#define INVALID_POINTER ((void *)(-2))
|
|
|
|
/* size of an empty region */
|
|
|
|
#define EMPTY_SIZE 0xffffffff
|
|
|
|
/* size of an invalid region */
|
|
|
|
#define INVALID_SIZE 0
|
|
|
|
|
|
|
|
typedef struct BoundEntry {
|
|
|
|
unsigned long start;
|
|
|
|
unsigned long size;
|
|
|
|
struct BoundEntry *next;
|
|
|
|
unsigned long is_invalid; /* true if pointers outside region are invalid */
|
|
|
|
} BoundEntry;
|
|
|
|
|
|
|
|
/* external interface */
|
|
|
|
void __bound_init(void);
|
|
|
|
void __bound_new_region(void *p, unsigned long size);
|
2002-01-06 01:03:56 +08:00
|
|
|
int __bound_delete_region(void *p);
|
2002-01-04 07:12:29 +08:00
|
|
|
|
2004-11-07 23:43:33 +08:00
|
|
|
#define FASTCALL __attribute__((regparm(3)))
|
2002-01-05 08:41:11 +08:00
|
|
|
|
|
|
|
void *__bound_malloc(size_t size, const void *caller);
|
|
|
|
void *__bound_memalign(size_t size, size_t align, const void *caller);
|
|
|
|
void __bound_free(void *ptr, const void *caller);
|
|
|
|
void *__bound_realloc(void *ptr, size_t size, const void *caller);
|
|
|
|
static void *libc_malloc(size_t size);
|
|
|
|
static void libc_free(void *ptr);
|
|
|
|
static void install_malloc_hooks(void);
|
|
|
|
static void restore_malloc_hooks(void);
|
2002-01-04 07:12:29 +08:00
|
|
|
|
2002-03-04 06:45:55 +08:00
|
|
|
#ifdef CONFIG_TCC_MALLOC_HOOKS
|
2002-01-05 08:41:11 +08:00
|
|
|
static void *saved_malloc_hook;
|
|
|
|
static void *saved_free_hook;
|
|
|
|
static void *saved_realloc_hook;
|
|
|
|
static void *saved_memalign_hook;
|
2002-01-13 00:39:35 +08:00
|
|
|
#endif
|
2002-01-04 07:12:29 +08:00
|
|
|
|
2002-08-18 21:25:38 +08:00
|
|
|
/* linker definitions */
|
2002-01-04 07:12:29 +08:00
|
|
|
extern char _end;
|
|
|
|
|
2002-08-18 21:25:38 +08:00
|
|
|
/* TCC definitions */
|
2002-07-25 06:11:56 +08:00
|
|
|
extern char __bounds_start; /* start of static bounds table */
|
2002-11-03 08:42:33 +08:00
|
|
|
/* error message, just for TCC */
|
|
|
|
const char *__bound_error_msg;
|
2002-08-18 21:25:38 +08:00
|
|
|
|
2002-07-25 06:11:56 +08:00
|
|
|
/* runtime error output */
|
|
|
|
extern void rt_error(unsigned long pc, const char *fmt, ...);
|
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
#ifdef BOUND_STATIC
|
|
|
|
static BoundEntry *__bound_t1[BOUND_T1_SIZE]; /* page table */
|
|
|
|
#else
|
|
|
|
static BoundEntry **__bound_t1; /* page table */
|
|
|
|
#endif
|
|
|
|
static BoundEntry *__bound_empty_t2; /* empty page, for unused pages */
|
|
|
|
static BoundEntry *__bound_invalid_t2; /* invalid page, for invalid pointers */
|
|
|
|
|
|
|
|
static BoundEntry *__bound_find_region(BoundEntry *e1, void *p)
|
|
|
|
{
|
|
|
|
unsigned long addr, tmp;
|
|
|
|
BoundEntry *e;
|
|
|
|
|
|
|
|
e = e1;
|
|
|
|
while (e != NULL) {
|
|
|
|
addr = (unsigned long)p;
|
|
|
|
addr -= e->start;
|
|
|
|
if (addr <= e->size) {
|
|
|
|
/* put region at the head */
|
|
|
|
tmp = e1->start;
|
|
|
|
e1->start = e->start;
|
|
|
|
e->start = tmp;
|
|
|
|
tmp = e1->size;
|
|
|
|
e1->size = e->size;
|
|
|
|
e->size = tmp;
|
|
|
|
return e1;
|
|
|
|
}
|
|
|
|
e = e->next;
|
|
|
|
}
|
|
|
|
/* no entry found: return empty entry or invalid entry */
|
|
|
|
if (e1->is_invalid)
|
|
|
|
return __bound_invalid_t2;
|
|
|
|
else
|
|
|
|
return __bound_empty_t2;
|
|
|
|
}
|
|
|
|
|
2002-01-13 00:39:35 +08:00
|
|
|
/* print a bound error message */
|
2002-11-03 08:42:33 +08:00
|
|
|
static void bound_error(const char *fmt, ...)
|
2002-01-04 07:12:29 +08:00
|
|
|
{
|
2002-11-03 08:42:33 +08:00
|
|
|
__bound_error_msg = fmt;
|
|
|
|
*(int *)0 = 0; /* force a runtime error */
|
2002-01-04 07:12:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bound_alloc_error(void)
|
|
|
|
{
|
2002-11-03 08:42:33 +08:00
|
|
|
bound_error("not enough memory for bound checking code");
|
2002-01-04 07:12:29 +08:00
|
|
|
}
|
|
|
|
|
2002-01-06 00:16:47 +08:00
|
|
|
/* return '(p + offset)' for pointer arithmetic (a pointer can reach
|
|
|
|
the end of a region in this case */
|
2004-11-07 23:43:33 +08:00
|
|
|
void * FASTCALL __bound_ptr_add(void *p, int offset)
|
2002-01-04 07:12:29 +08:00
|
|
|
{
|
|
|
|
unsigned long addr = (unsigned long)p;
|
|
|
|
BoundEntry *e;
|
2002-01-05 08:41:11 +08:00
|
|
|
#if defined(BOUND_DEBUG)
|
2002-01-04 07:12:29 +08:00
|
|
|
printf("add: 0x%x %d\n", (int)p, offset);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
|
|
|
|
e = (BoundEntry *)((char *)e +
|
|
|
|
((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
|
|
|
|
((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
|
|
|
|
addr -= e->start;
|
|
|
|
if (addr > e->size) {
|
|
|
|
e = __bound_find_region(e, p);
|
|
|
|
addr = (unsigned long)p - e->start;
|
|
|
|
}
|
|
|
|
addr += offset;
|
|
|
|
if (addr > e->size)
|
|
|
|
return INVALID_POINTER; /* return an invalid pointer */
|
|
|
|
return p + offset;
|
|
|
|
}
|
|
|
|
|
2002-01-06 00:16:47 +08:00
|
|
|
/* return '(p + offset)' for pointer indirection (the resulting must
|
|
|
|
be strictly inside the region */
|
|
|
|
#define BOUND_PTR_INDIR(dsize) \
|
2004-11-07 23:43:33 +08:00
|
|
|
void * FASTCALL __bound_ptr_indir ## dsize (void *p, int offset) \
|
2002-01-04 07:12:29 +08:00
|
|
|
{ \
|
|
|
|
unsigned long addr = (unsigned long)p; \
|
|
|
|
BoundEntry *e; \
|
|
|
|
\
|
|
|
|
e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)]; \
|
|
|
|
e = (BoundEntry *)((char *)e + \
|
|
|
|
((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) & \
|
|
|
|
((BOUND_T2_SIZE - 1) << BOUND_E_BITS))); \
|
|
|
|
addr -= e->start; \
|
|
|
|
if (addr > e->size) { \
|
|
|
|
e = __bound_find_region(e, p); \
|
|
|
|
addr = (unsigned long)p - e->start; \
|
|
|
|
} \
|
2002-01-06 00:16:47 +08:00
|
|
|
addr += offset + dsize; \
|
2002-01-04 07:12:29 +08:00
|
|
|
if (addr > e->size) \
|
2002-01-06 00:16:47 +08:00
|
|
|
return INVALID_POINTER; /* return an invalid pointer */ \
|
|
|
|
return p + offset; \
|
2002-01-04 07:12:29 +08:00
|
|
|
}
|
|
|
|
|
2009-12-20 05:22:43 +08:00
|
|
|
BOUND_PTR_INDIR(1)
|
|
|
|
BOUND_PTR_INDIR(2)
|
|
|
|
BOUND_PTR_INDIR(4)
|
|
|
|
BOUND_PTR_INDIR(8)
|
|
|
|
BOUND_PTR_INDIR(12)
|
|
|
|
BOUND_PTR_INDIR(16)
|
|
|
|
|
2002-01-06 01:03:56 +08:00
|
|
|
/* return the frame pointer of the caller */
|
2002-01-06 00:16:47 +08:00
|
|
|
#define GET_CALLER_FP(fp)\
|
|
|
|
{\
|
lib/bcheck: Prevent __bound_local_new / __bound_local_delete from being miscompiled
On i386 and gcc-4.7 I found that __bound_local_new was miscompiled -
look:
#ifdef __i386__
/* return the frame pointer of the caller */
#define GET_CALLER_FP(fp)\
{\
unsigned long *fp1;\
__asm__ __volatile__ ("movl %%ebp,%0" :"=g" (fp1));\
fp = fp1[0];\
}
#endif
/* called when entering a function to add all the local regions */
void FASTCALL __bound_local_new(void *p1)
{
unsigned long addr, size, fp, *p = p1;
GET_CALLER_FP(fp);
for(;;) {
addr = p[0];
if (addr == 0)
break;
addr += fp;
size = p[1];
p += 2;
__bound_new_region((void *)addr, size);
}
}
__bound_local_new:
.LFB40:
.cfi_startproc
pushl %esi
.cfi_def_cfa_offset 8
.cfi_offset 6, -8
pushl %ebx
.cfi_def_cfa_offset 12
.cfi_offset 3, -12
subl $8, %esp // NOTE prologue does not touch %ebp
.cfi_def_cfa_offset 20
#APP
# 235 "lib/bcheck.c" 1
movl %ebp,%edx // %ebp -> fp1
# 0 "" 2
#NO_APP
movl (%edx), %esi // fp1[0] -> fp
movl (%eax), %edx
movl %eax, %ebx
testl %edx, %edx
je .L167
.p2align 2,,3
.L173:
movl 4(%ebx), %eax
addl $8, %ebx
movl %eax, 4(%esp)
addl %esi, %edx
movl %edx, (%esp)
call __bound_new_region
movl (%ebx), %edx
testl %edx, %edx
jne .L173
.L167:
addl $8, %esp
.cfi_def_cfa_offset 12
popl %ebx
.cfi_restore 3
.cfi_def_cfa_offset 8
popl %esi
.cfi_restore 6
.cfi_def_cfa_offset 4
ret
here GET_CALLER_FP() assumed that its using function setups it's stack
frame, i.e. first save, then set %ebp to stack frame start, and then it
has to do perform two lookups: 1) to get current stack frame through
%ebp, and 2) get caller stack frame through (%ebp).
And here is the problem: gcc decided not to setup %ebp for
__bound_local_new and in such case GET_CALLER_FP actually becomes
GET_CALLER_CALLER_FP and oops, wrong regions are registered in bcheck
tables...
The solution is to stop using hand written assembly and rely on gcc's
__builtin_frame_address(1) to get callers frame stack(*). I think for the
builtin gcc should generate correct code, independent of whether it
decides or not to omit frame pointer in using function - it knows it.
(*) judging by gcc history, __builtin_frame_address was there almost
from the beginning - at least it is present in 1992 as seen from the
following commit:
http://gcc.gnu.org/git/?p=gcc.git;a=commit;h=be07f7bdbac76d87d3006c89855491504d5d6202
so we can rely on it being supported by all versions of gcc.
In my environment the assembly of __bound_local_new changes as follows:
diff --git a/bcheck0.s b/bcheck1.s
index 4c02a5f..ef68918 100644
--- a/bcheck0.s
+++ b/bcheck1.s
@@ -1409,20 +1409,17 @@ __bound_init:
__bound_local_new:
.LFB40:
.cfi_startproc
- pushl %esi
+ pushl %ebp // NOTE prologue saves %ebp ...
.cfi_def_cfa_offset 8
- .cfi_offset 6, -8
+ .cfi_offset 5, -8
+ movl %esp, %ebp // ... and reset it to local stack frame
+ .cfi_def_cfa_register 5
+ pushl %esi
pushl %ebx
- .cfi_def_cfa_offset 12
- .cfi_offset 3, -12
subl $8, %esp
- .cfi_def_cfa_offset 20
-#APP
-# 235 "lib/bcheck.c" 1
- movl %ebp,%edx
-# 0 "" 2
-#NO_APP
- movl (%edx), %esi
+ .cfi_offset 6, -12
+ .cfi_offset 3, -16
+ movl 0(%ebp), %esi // stkframe -> stkframe.parent -> fp
movl (%eax), %edx
movl %eax, %ebx
testl %edx, %edx
@@ -1440,13 +1437,13 @@ __bound_local_new:
jne .L173
.L167:
addl $8, %esp
- .cfi_def_cfa_offset 12
popl %ebx
.cfi_restore 3
- .cfi_def_cfa_offset 8
popl %esi
.cfi_restore 6
- .cfi_def_cfa_offset 4
+ popl %ebp
+ .cfi_restore 5
+ .cfi_def_cfa 4, 4
ret
.cfi_endproc
i.e. now it compiles correctly.
Though I do not have x86_64 to test, my guess is that
__builtin_frame_address(1) should work there too. If not - please revert
only x86_64 part of the patch. Thanks.
Cc: Michael Matz <matz@suse.de>
2012-11-13 17:14:26 +08:00
|
|
|
fp = (unsigned long)__builtin_frame_address(1);\
|
2002-01-06 00:16:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* called when entering a function to add all the local regions */
|
2004-11-07 23:43:33 +08:00
|
|
|
void FASTCALL __bound_local_new(void *p1)
|
2002-01-06 00:16:47 +08:00
|
|
|
{
|
|
|
|
unsigned long addr, size, fp, *p = p1;
|
|
|
|
GET_CALLER_FP(fp);
|
|
|
|
for(;;) {
|
|
|
|
addr = p[0];
|
|
|
|
if (addr == 0)
|
|
|
|
break;
|
|
|
|
addr += fp;
|
|
|
|
size = p[1];
|
|
|
|
p += 2;
|
|
|
|
__bound_new_region((void *)addr, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* called when leaving a function to delete all the local regions */
|
2004-11-07 23:43:33 +08:00
|
|
|
void FASTCALL __bound_local_delete(void *p1)
|
2002-01-06 00:16:47 +08:00
|
|
|
{
|
|
|
|
unsigned long addr, fp, *p = p1;
|
|
|
|
GET_CALLER_FP(fp);
|
|
|
|
for(;;) {
|
|
|
|
addr = p[0];
|
|
|
|
if (addr == 0)
|
|
|
|
break;
|
|
|
|
addr += fp;
|
|
|
|
p += 2;
|
|
|
|
__bound_delete_region((void *)addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
static BoundEntry *__bound_new_page(void)
|
|
|
|
{
|
|
|
|
BoundEntry *page;
|
|
|
|
int i;
|
|
|
|
|
2002-01-05 08:41:11 +08:00
|
|
|
page = libc_malloc(sizeof(BoundEntry) * BOUND_T2_SIZE);
|
2002-01-04 07:12:29 +08:00
|
|
|
if (!page)
|
|
|
|
bound_alloc_error();
|
|
|
|
for(i=0;i<BOUND_T2_SIZE;i++) {
|
|
|
|
/* put empty entries */
|
|
|
|
page[i].start = 0;
|
|
|
|
page[i].size = EMPTY_SIZE;
|
|
|
|
page[i].next = NULL;
|
|
|
|
page[i].is_invalid = 0;
|
|
|
|
}
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* currently we use malloc(). Should use bound_new_page() */
|
|
|
|
static BoundEntry *bound_new_entry(void)
|
|
|
|
{
|
|
|
|
BoundEntry *e;
|
2002-01-05 08:41:11 +08:00
|
|
|
e = libc_malloc(sizeof(BoundEntry));
|
2002-01-04 07:12:29 +08:00
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bound_free_entry(BoundEntry *e)
|
|
|
|
{
|
2002-01-05 08:41:11 +08:00
|
|
|
libc_free(e);
|
2002-01-04 07:12:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline BoundEntry *get_page(int index)
|
|
|
|
{
|
|
|
|
BoundEntry *page;
|
|
|
|
page = __bound_t1[index];
|
|
|
|
if (page == __bound_empty_t2 || page == __bound_invalid_t2) {
|
|
|
|
/* create a new page if necessary */
|
|
|
|
page = __bound_new_page();
|
|
|
|
__bound_t1[index] = page;
|
|
|
|
}
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mark a region as being invalid (can only be used during init) */
|
|
|
|
static void mark_invalid(unsigned long addr, unsigned long size)
|
|
|
|
{
|
|
|
|
unsigned long start, end;
|
|
|
|
BoundEntry *page;
|
|
|
|
int t1_start, t1_end, i, j, t2_start, t2_end;
|
|
|
|
|
|
|
|
start = addr;
|
|
|
|
end = addr + size;
|
|
|
|
|
|
|
|
t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS;
|
|
|
|
if (end != 0)
|
|
|
|
t2_end = end >> BOUND_T3_BITS;
|
|
|
|
else
|
|
|
|
t2_end = 1 << (BOUND_T1_BITS + BOUND_T2_BITS);
|
|
|
|
|
2002-01-05 08:41:11 +08:00
|
|
|
#if 0
|
2002-01-04 07:12:29 +08:00
|
|
|
printf("mark_invalid: start = %x %x\n", t2_start, t2_end);
|
2002-01-05 08:41:11 +08:00
|
|
|
#endif
|
2002-01-04 07:12:29 +08:00
|
|
|
|
|
|
|
/* first we handle full pages */
|
|
|
|
t1_start = (t2_start + BOUND_T2_SIZE - 1) >> BOUND_T2_BITS;
|
|
|
|
t1_end = t2_end >> BOUND_T2_BITS;
|
|
|
|
|
|
|
|
i = t2_start & (BOUND_T2_SIZE - 1);
|
|
|
|
j = t2_end & (BOUND_T2_SIZE - 1);
|
|
|
|
|
|
|
|
if (t1_start == t1_end) {
|
|
|
|
page = get_page(t2_start >> BOUND_T2_BITS);
|
|
|
|
for(; i < j; i++) {
|
|
|
|
page[i].size = INVALID_SIZE;
|
|
|
|
page[i].is_invalid = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (i > 0) {
|
|
|
|
page = get_page(t2_start >> BOUND_T2_BITS);
|
|
|
|
for(; i < BOUND_T2_SIZE; i++) {
|
|
|
|
page[i].size = INVALID_SIZE;
|
|
|
|
page[i].is_invalid = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for(i = t1_start; i < t1_end; i++) {
|
|
|
|
__bound_t1[i] = __bound_invalid_t2;
|
|
|
|
}
|
|
|
|
if (j != 0) {
|
|
|
|
page = get_page(t1_end);
|
|
|
|
for(i = 0; i < j; i++) {
|
|
|
|
page[i].size = INVALID_SIZE;
|
|
|
|
page[i].is_invalid = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void __bound_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
BoundEntry *page;
|
|
|
|
unsigned long start, size;
|
2002-07-25 06:11:56 +08:00
|
|
|
int *p;
|
2002-01-04 07:12:29 +08:00
|
|
|
|
2002-01-05 08:41:11 +08:00
|
|
|
/* save malloc hooks and install bound check hooks */
|
|
|
|
install_malloc_hooks();
|
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
#ifndef BOUND_STATIC
|
2002-01-05 08:41:11 +08:00
|
|
|
__bound_t1 = libc_malloc(BOUND_T1_SIZE * sizeof(BoundEntry *));
|
2002-01-04 07:12:29 +08:00
|
|
|
if (!__bound_t1)
|
|
|
|
bound_alloc_error();
|
|
|
|
#endif
|
|
|
|
__bound_empty_t2 = __bound_new_page();
|
|
|
|
for(i=0;i<BOUND_T1_SIZE;i++) {
|
|
|
|
__bound_t1[i] = __bound_empty_t2;
|
|
|
|
}
|
|
|
|
|
|
|
|
page = __bound_new_page();
|
|
|
|
for(i=0;i<BOUND_T2_SIZE;i++) {
|
|
|
|
/* put invalid entries */
|
|
|
|
page[i].start = 0;
|
|
|
|
page[i].size = INVALID_SIZE;
|
|
|
|
page[i].next = NULL;
|
|
|
|
page[i].is_invalid = 1;
|
|
|
|
}
|
|
|
|
__bound_invalid_t2 = page;
|
|
|
|
|
|
|
|
/* invalid pointer zone */
|
|
|
|
start = (unsigned long)INVALID_POINTER & ~(BOUND_T23_SIZE - 1);
|
|
|
|
size = BOUND_T23_SIZE;
|
|
|
|
mark_invalid(start, size);
|
|
|
|
|
2002-03-04 06:45:55 +08:00
|
|
|
#if !defined(__TINYC__) && defined(CONFIG_TCC_MALLOC_HOOKS)
|
|
|
|
/* malloc zone is also marked invalid. can only use that with
|
|
|
|
hooks because all libs should use the same malloc. The solution
|
|
|
|
would be to build a new malloc for tcc. */
|
2002-01-04 07:12:29 +08:00
|
|
|
start = (unsigned long)&_end;
|
|
|
|
size = 128 * 0x100000;
|
|
|
|
mark_invalid(start, size);
|
2002-01-05 08:41:11 +08:00
|
|
|
#endif
|
2002-07-25 06:11:56 +08:00
|
|
|
|
|
|
|
/* add all static bound check values */
|
|
|
|
p = (int *)&__bounds_start;
|
|
|
|
while (p[0] != 0) {
|
|
|
|
__bound_new_region((void *)p[0], p[1]);
|
|
|
|
p += 2;
|
|
|
|
}
|
2002-01-04 07:12:29 +08:00
|
|
|
}
|
|
|
|
|
2009-07-07 03:10:14 +08:00
|
|
|
void __bound_exit(void)
|
|
|
|
{
|
|
|
|
restore_malloc_hooks();
|
|
|
|
}
|
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
static inline void add_region(BoundEntry *e,
|
|
|
|
unsigned long start, unsigned long size)
|
|
|
|
{
|
|
|
|
BoundEntry *e1;
|
|
|
|
if (e->start == 0) {
|
|
|
|
/* no region : add it */
|
|
|
|
e->start = start;
|
|
|
|
e->size = size;
|
|
|
|
} else {
|
|
|
|
/* already regions in the list: add it at the head */
|
|
|
|
e1 = bound_new_entry();
|
|
|
|
e1->start = e->start;
|
|
|
|
e1->size = e->size;
|
|
|
|
e1->next = e->next;
|
|
|
|
e->start = start;
|
|
|
|
e->size = size;
|
|
|
|
e->next = e1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* create a new region. It should not already exist in the region list */
|
|
|
|
void __bound_new_region(void *p, unsigned long size)
|
|
|
|
{
|
|
|
|
unsigned long start, end;
|
|
|
|
BoundEntry *page, *e, *e2;
|
|
|
|
int t1_start, t1_end, i, t2_start, t2_end;
|
|
|
|
|
|
|
|
start = (unsigned long)p;
|
|
|
|
end = start + size;
|
|
|
|
t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
|
|
|
|
t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
|
|
|
|
|
|
|
|
/* start */
|
|
|
|
page = get_page(t1_start);
|
|
|
|
t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
|
|
|
|
((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
|
|
|
|
t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
|
|
|
|
((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
|
|
|
|
#ifdef BOUND_DEBUG
|
|
|
|
printf("new %lx %lx %x %x %x %x\n",
|
|
|
|
start, end, t1_start, t1_end, t2_start, t2_end);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
e = (BoundEntry *)((char *)page + t2_start);
|
|
|
|
add_region(e, start, size);
|
|
|
|
|
|
|
|
if (t1_end == t1_start) {
|
|
|
|
/* same ending page */
|
|
|
|
e2 = (BoundEntry *)((char *)page + t2_end);
|
|
|
|
if (e2 > e) {
|
|
|
|
e++;
|
|
|
|
for(;e<e2;e++) {
|
|
|
|
e->start = start;
|
|
|
|
e->size = size;
|
|
|
|
}
|
|
|
|
add_region(e, start, size);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* mark until end of page */
|
|
|
|
e2 = page + BOUND_T2_SIZE;
|
|
|
|
e++;
|
|
|
|
for(;e<e2;e++) {
|
|
|
|
e->start = start;
|
|
|
|
e->size = size;
|
|
|
|
}
|
|
|
|
/* mark intermediate pages, if any */
|
|
|
|
for(i=t1_start+1;i<t1_end;i++) {
|
|
|
|
page = get_page(i);
|
|
|
|
e2 = page + BOUND_T2_SIZE;
|
|
|
|
for(e=page;e<e2;e++) {
|
|
|
|
e->start = start;
|
|
|
|
e->size = size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* last page */
|
2002-01-27 02:53:47 +08:00
|
|
|
page = get_page(t1_end);
|
2002-01-04 07:12:29 +08:00
|
|
|
e2 = (BoundEntry *)((char *)page + t2_end);
|
|
|
|
for(e=page;e<e2;e++) {
|
|
|
|
e->start = start;
|
|
|
|
e->size = size;
|
|
|
|
}
|
|
|
|
add_region(e, start, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* delete a region */
|
|
|
|
static inline void delete_region(BoundEntry *e,
|
|
|
|
void *p, unsigned long empty_size)
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
BoundEntry *e1;
|
|
|
|
|
|
|
|
addr = (unsigned long)p;
|
|
|
|
addr -= e->start;
|
|
|
|
if (addr <= e->size) {
|
|
|
|
/* region found is first one */
|
|
|
|
e1 = e->next;
|
|
|
|
if (e1 == NULL) {
|
|
|
|
/* no more region: mark it empty */
|
|
|
|
e->start = 0;
|
|
|
|
e->size = empty_size;
|
|
|
|
} else {
|
|
|
|
/* copy next region in head */
|
|
|
|
e->start = e1->start;
|
|
|
|
e->size = e1->size;
|
|
|
|
e->next = e1->next;
|
|
|
|
bound_free_entry(e1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* find the matching region */
|
|
|
|
for(;;) {
|
|
|
|
e1 = e;
|
|
|
|
e = e->next;
|
|
|
|
/* region not found: do nothing */
|
|
|
|
if (e == NULL)
|
|
|
|
break;
|
|
|
|
addr = (unsigned long)p - e->start;
|
|
|
|
if (addr <= e->size) {
|
|
|
|
/* found: remove entry */
|
|
|
|
e1->next = e->next;
|
|
|
|
bound_free_entry(e);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* WARNING: 'p' must be the starting point of the region. */
|
2002-01-06 01:03:56 +08:00
|
|
|
/* return non zero if error */
|
|
|
|
int __bound_delete_region(void *p)
|
2002-01-04 07:12:29 +08:00
|
|
|
{
|
|
|
|
unsigned long start, end, addr, size, empty_size;
|
|
|
|
BoundEntry *page, *e, *e2;
|
|
|
|
int t1_start, t1_end, t2_start, t2_end, i;
|
|
|
|
|
|
|
|
start = (unsigned long)p;
|
|
|
|
t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
|
|
|
|
t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
|
|
|
|
((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
|
|
|
|
|
|
|
|
/* find region size */
|
|
|
|
page = __bound_t1[t1_start];
|
|
|
|
e = (BoundEntry *)((char *)page + t2_start);
|
|
|
|
addr = start - e->start;
|
|
|
|
if (addr > e->size)
|
|
|
|
e = __bound_find_region(e, p);
|
|
|
|
/* test if invalid region */
|
|
|
|
if (e->size == EMPTY_SIZE || (unsigned long)p != e->start)
|
2002-01-06 01:03:56 +08:00
|
|
|
return -1;
|
2002-01-04 07:12:29 +08:00
|
|
|
/* compute the size we put in invalid regions */
|
|
|
|
if (e->is_invalid)
|
|
|
|
empty_size = INVALID_SIZE;
|
|
|
|
else
|
|
|
|
empty_size = EMPTY_SIZE;
|
|
|
|
size = e->size;
|
|
|
|
end = start + size;
|
|
|
|
|
|
|
|
/* now we can free each entry */
|
|
|
|
t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
|
|
|
|
t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
|
|
|
|
((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
|
|
|
|
|
|
|
|
delete_region(e, p, empty_size);
|
|
|
|
if (t1_end == t1_start) {
|
|
|
|
/* same ending page */
|
|
|
|
e2 = (BoundEntry *)((char *)page + t2_end);
|
|
|
|
if (e2 > e) {
|
|
|
|
e++;
|
|
|
|
for(;e<e2;e++) {
|
|
|
|
e->start = 0;
|
|
|
|
e->size = empty_size;
|
|
|
|
}
|
|
|
|
delete_region(e, p, empty_size);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* mark until end of page */
|
|
|
|
e2 = page + BOUND_T2_SIZE;
|
|
|
|
e++;
|
|
|
|
for(;e<e2;e++) {
|
|
|
|
e->start = 0;
|
|
|
|
e->size = empty_size;
|
|
|
|
}
|
|
|
|
/* mark intermediate pages, if any */
|
|
|
|
/* XXX: should free them */
|
|
|
|
for(i=t1_start+1;i<t1_end;i++) {
|
|
|
|
page = get_page(i);
|
|
|
|
e2 = page + BOUND_T2_SIZE;
|
|
|
|
for(e=page;e<e2;e++) {
|
|
|
|
e->start = 0;
|
|
|
|
e->size = empty_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* last page */
|
|
|
|
page = get_page(t2_end);
|
|
|
|
e2 = (BoundEntry *)((char *)page + t2_end);
|
|
|
|
for(e=page;e<e2;e++) {
|
|
|
|
e->start = 0;
|
|
|
|
e->size = empty_size;
|
|
|
|
}
|
|
|
|
delete_region(e, p, empty_size);
|
|
|
|
}
|
2002-01-06 01:03:56 +08:00
|
|
|
return 0;
|
2002-01-04 07:12:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* return the size of the region starting at p, or EMPTY_SIZE if non
|
|
|
|
existant region. */
|
|
|
|
static unsigned long get_region_size(void *p)
|
|
|
|
{
|
|
|
|
unsigned long addr = (unsigned long)p;
|
|
|
|
BoundEntry *e;
|
|
|
|
|
|
|
|
e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
|
|
|
|
e = (BoundEntry *)((char *)e +
|
|
|
|
((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
|
|
|
|
((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
|
|
|
|
addr -= e->start;
|
|
|
|
if (addr > e->size)
|
|
|
|
e = __bound_find_region(e, p);
|
|
|
|
if (e->start != (unsigned long)p)
|
|
|
|
return EMPTY_SIZE;
|
|
|
|
return e->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* patched memory functions */
|
|
|
|
|
lib/bcheck: Prevent libc_malloc/libc_free etc from being miscompiled
On i386 and gcc-4.7 I found that libc_malloc was miscompiled - look:
static void *libc_malloc(size_t size)
{
void *ptr;
restore_malloc_hooks(); // __malloc_hook = saved_malloc_hook
ptr = malloc(size);
install_malloc_hooks(); // saved_malloc_hook = __malloc_hook, __malloc_hook = __bound_malloc
return ptr;
}
.type libc_malloc, @function
libc_malloc:
.LFB56:
.cfi_startproc
pushl %edx
.cfi_def_cfa_offset 8
movl %eax, (%esp)
call malloc
movl $__bound_malloc, __malloc_hook
movl $__bound_free, __free_hook
movl $__bound_realloc, __realloc_hook
movl $__bound_memalign, __memalign_hook
popl %ecx
.cfi_def_cfa_offset 4
ret
Here gcc inlined both restore_malloc_hooks() and install_malloc_hooks()
and decided that
saved_malloc_hook -> __malloc_hook -> saved_malloc_hook
stores are not needed and could be ommitted. Only it did not know
__molloc_hook affects malloc()...
So add compiler barrier to both install and restore hooks functions and
be done with it - the code is now ok:
diff --git a/bcheck0.s b/bcheck1.s
index 5f50293..4c02a5f 100644
--- a/bcheck0.s
+++ b/bcheck1.s
@@ -42,8 +42,24 @@ libc_malloc:
.cfi_startproc
pushl %edx
.cfi_def_cfa_offset 8
+ movl saved_malloc_hook, %edx
+ movl %edx, __malloc_hook
+ movl saved_free_hook, %edx
+ movl %edx, __free_hook
+ movl saved_realloc_hook, %edx
+ movl %edx, __realloc_hook
+ movl saved_memalign_hook, %edx
+ movl %edx, __memalign_hook
movl %eax, (%esp)
call malloc
+ movl __malloc_hook, %edx
+ movl %edx, saved_malloc_hook
+ movl __free_hook, %edx
+ movl %edx, saved_free_hook
+ movl __realloc_hook, %edx
+ movl %edx, saved_realloc_hook
+ movl __memalign_hook, %edx
+ movl %edx, saved_memalign_hook
movl $__bound_malloc, __malloc_hook
movl $__bound_free, __free_hook
movl $__bound_realloc, __realloc_hook
For barrier I use
__asm__ __volatile__ ("": : : "memory")
which is used as compiler barrier by Linux kernel, and mentioned in gcc
docs and in wikipedia [1].
Without this patch any program compiled with tcc -b crashes in startup
because of infinite recursion in libc_malloc.
[1] http://en.wikipedia.org/wiki/Memory_ordering#Compiler_memory_barrier
2012-11-13 04:34:16 +08:00
|
|
|
/* force compiler to perform stores coded up to this point */
|
|
|
|
#define barrier() __asm__ __volatile__ ("": : : "memory")
|
|
|
|
|
2002-01-05 08:41:11 +08:00
|
|
|
static void install_malloc_hooks(void)
|
|
|
|
{
|
2002-03-04 06:45:55 +08:00
|
|
|
#ifdef CONFIG_TCC_MALLOC_HOOKS
|
2002-01-05 08:41:11 +08:00
|
|
|
saved_malloc_hook = __malloc_hook;
|
|
|
|
saved_free_hook = __free_hook;
|
|
|
|
saved_realloc_hook = __realloc_hook;
|
|
|
|
saved_memalign_hook = __memalign_hook;
|
|
|
|
__malloc_hook = __bound_malloc;
|
|
|
|
__free_hook = __bound_free;
|
|
|
|
__realloc_hook = __bound_realloc;
|
|
|
|
__memalign_hook = __bound_memalign;
|
lib/bcheck: Prevent libc_malloc/libc_free etc from being miscompiled
On i386 and gcc-4.7 I found that libc_malloc was miscompiled - look:
static void *libc_malloc(size_t size)
{
void *ptr;
restore_malloc_hooks(); // __malloc_hook = saved_malloc_hook
ptr = malloc(size);
install_malloc_hooks(); // saved_malloc_hook = __malloc_hook, __malloc_hook = __bound_malloc
return ptr;
}
.type libc_malloc, @function
libc_malloc:
.LFB56:
.cfi_startproc
pushl %edx
.cfi_def_cfa_offset 8
movl %eax, (%esp)
call malloc
movl $__bound_malloc, __malloc_hook
movl $__bound_free, __free_hook
movl $__bound_realloc, __realloc_hook
movl $__bound_memalign, __memalign_hook
popl %ecx
.cfi_def_cfa_offset 4
ret
Here gcc inlined both restore_malloc_hooks() and install_malloc_hooks()
and decided that
saved_malloc_hook -> __malloc_hook -> saved_malloc_hook
stores are not needed and could be ommitted. Only it did not know
__molloc_hook affects malloc()...
So add compiler barrier to both install and restore hooks functions and
be done with it - the code is now ok:
diff --git a/bcheck0.s b/bcheck1.s
index 5f50293..4c02a5f 100644
--- a/bcheck0.s
+++ b/bcheck1.s
@@ -42,8 +42,24 @@ libc_malloc:
.cfi_startproc
pushl %edx
.cfi_def_cfa_offset 8
+ movl saved_malloc_hook, %edx
+ movl %edx, __malloc_hook
+ movl saved_free_hook, %edx
+ movl %edx, __free_hook
+ movl saved_realloc_hook, %edx
+ movl %edx, __realloc_hook
+ movl saved_memalign_hook, %edx
+ movl %edx, __memalign_hook
movl %eax, (%esp)
call malloc
+ movl __malloc_hook, %edx
+ movl %edx, saved_malloc_hook
+ movl __free_hook, %edx
+ movl %edx, saved_free_hook
+ movl __realloc_hook, %edx
+ movl %edx, saved_realloc_hook
+ movl __memalign_hook, %edx
+ movl %edx, saved_memalign_hook
movl $__bound_malloc, __malloc_hook
movl $__bound_free, __free_hook
movl $__bound_realloc, __realloc_hook
For barrier I use
__asm__ __volatile__ ("": : : "memory")
which is used as compiler barrier by Linux kernel, and mentioned in gcc
docs and in wikipedia [1].
Without this patch any program compiled with tcc -b crashes in startup
because of infinite recursion in libc_malloc.
[1] http://en.wikipedia.org/wiki/Memory_ordering#Compiler_memory_barrier
2012-11-13 04:34:16 +08:00
|
|
|
|
|
|
|
barrier();
|
2002-01-13 00:39:35 +08:00
|
|
|
#endif
|
2002-01-05 08:41:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void restore_malloc_hooks(void)
|
|
|
|
{
|
2002-03-04 06:45:55 +08:00
|
|
|
#ifdef CONFIG_TCC_MALLOC_HOOKS
|
2002-01-05 08:41:11 +08:00
|
|
|
__malloc_hook = saved_malloc_hook;
|
|
|
|
__free_hook = saved_free_hook;
|
|
|
|
__realloc_hook = saved_realloc_hook;
|
|
|
|
__memalign_hook = saved_memalign_hook;
|
lib/bcheck: Prevent libc_malloc/libc_free etc from being miscompiled
On i386 and gcc-4.7 I found that libc_malloc was miscompiled - look:
static void *libc_malloc(size_t size)
{
void *ptr;
restore_malloc_hooks(); // __malloc_hook = saved_malloc_hook
ptr = malloc(size);
install_malloc_hooks(); // saved_malloc_hook = __malloc_hook, __malloc_hook = __bound_malloc
return ptr;
}
.type libc_malloc, @function
libc_malloc:
.LFB56:
.cfi_startproc
pushl %edx
.cfi_def_cfa_offset 8
movl %eax, (%esp)
call malloc
movl $__bound_malloc, __malloc_hook
movl $__bound_free, __free_hook
movl $__bound_realloc, __realloc_hook
movl $__bound_memalign, __memalign_hook
popl %ecx
.cfi_def_cfa_offset 4
ret
Here gcc inlined both restore_malloc_hooks() and install_malloc_hooks()
and decided that
saved_malloc_hook -> __malloc_hook -> saved_malloc_hook
stores are not needed and could be ommitted. Only it did not know
__molloc_hook affects malloc()...
So add compiler barrier to both install and restore hooks functions and
be done with it - the code is now ok:
diff --git a/bcheck0.s b/bcheck1.s
index 5f50293..4c02a5f 100644
--- a/bcheck0.s
+++ b/bcheck1.s
@@ -42,8 +42,24 @@ libc_malloc:
.cfi_startproc
pushl %edx
.cfi_def_cfa_offset 8
+ movl saved_malloc_hook, %edx
+ movl %edx, __malloc_hook
+ movl saved_free_hook, %edx
+ movl %edx, __free_hook
+ movl saved_realloc_hook, %edx
+ movl %edx, __realloc_hook
+ movl saved_memalign_hook, %edx
+ movl %edx, __memalign_hook
movl %eax, (%esp)
call malloc
+ movl __malloc_hook, %edx
+ movl %edx, saved_malloc_hook
+ movl __free_hook, %edx
+ movl %edx, saved_free_hook
+ movl __realloc_hook, %edx
+ movl %edx, saved_realloc_hook
+ movl __memalign_hook, %edx
+ movl %edx, saved_memalign_hook
movl $__bound_malloc, __malloc_hook
movl $__bound_free, __free_hook
movl $__bound_realloc, __realloc_hook
For barrier I use
__asm__ __volatile__ ("": : : "memory")
which is used as compiler barrier by Linux kernel, and mentioned in gcc
docs and in wikipedia [1].
Without this patch any program compiled with tcc -b crashes in startup
because of infinite recursion in libc_malloc.
[1] http://en.wikipedia.org/wiki/Memory_ordering#Compiler_memory_barrier
2012-11-13 04:34:16 +08:00
|
|
|
|
|
|
|
barrier();
|
2002-01-13 00:39:35 +08:00
|
|
|
#endif
|
2002-01-05 08:41:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *libc_malloc(size_t size)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
restore_malloc_hooks();
|
|
|
|
ptr = malloc(size);
|
|
|
|
install_malloc_hooks();
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void libc_free(void *ptr)
|
|
|
|
{
|
|
|
|
restore_malloc_hooks();
|
|
|
|
free(ptr);
|
|
|
|
install_malloc_hooks();
|
|
|
|
}
|
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
/* XXX: we should use a malloc which ensure that it is unlikely that
|
|
|
|
two malloc'ed data have the same address if 'free' are made in
|
|
|
|
between. */
|
2002-01-05 08:41:11 +08:00
|
|
|
void *__bound_malloc(size_t size, const void *caller)
|
2002-01-04 07:12:29 +08:00
|
|
|
{
|
|
|
|
void *ptr;
|
2002-01-05 08:41:11 +08:00
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
/* we allocate one more byte to ensure the regions will be
|
|
|
|
separated by at least one byte. With the glibc malloc, it may
|
|
|
|
be in fact not necessary */
|
2002-01-05 08:41:11 +08:00
|
|
|
ptr = libc_malloc(size + 1);
|
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
if (!ptr)
|
|
|
|
return NULL;
|
|
|
|
__bound_new_region(ptr, size);
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2002-01-05 08:41:11 +08:00
|
|
|
void *__bound_memalign(size_t size, size_t align, const void *caller)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
restore_malloc_hooks();
|
|
|
|
|
2002-12-08 22:34:30 +08:00
|
|
|
#ifndef HAVE_MEMALIGN
|
|
|
|
if (align > 4) {
|
|
|
|
/* XXX: handle it ? */
|
|
|
|
ptr = NULL;
|
|
|
|
} else {
|
|
|
|
/* we suppose that malloc aligns to at least four bytes */
|
|
|
|
ptr = malloc(size + 1);
|
|
|
|
}
|
|
|
|
#else
|
2002-01-05 08:41:11 +08:00
|
|
|
/* we allocate one more byte to ensure the regions will be
|
|
|
|
separated by at least one byte. With the glibc malloc, it may
|
|
|
|
be in fact not necessary */
|
|
|
|
ptr = memalign(size + 1, align);
|
2002-12-08 22:34:30 +08:00
|
|
|
#endif
|
2002-01-05 08:41:11 +08:00
|
|
|
|
|
|
|
install_malloc_hooks();
|
|
|
|
|
|
|
|
if (!ptr)
|
|
|
|
return NULL;
|
|
|
|
__bound_new_region(ptr, size);
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __bound_free(void *ptr, const void *caller)
|
2002-01-04 07:12:29 +08:00
|
|
|
{
|
|
|
|
if (ptr == NULL)
|
|
|
|
return;
|
2002-01-06 01:03:56 +08:00
|
|
|
if (__bound_delete_region(ptr) != 0)
|
2002-11-03 08:42:33 +08:00
|
|
|
bound_error("freeing invalid region");
|
2002-01-05 08:41:11 +08:00
|
|
|
|
|
|
|
libc_free(ptr);
|
2002-01-04 07:12:29 +08:00
|
|
|
}
|
|
|
|
|
2002-01-05 08:41:11 +08:00
|
|
|
void *__bound_realloc(void *ptr, size_t size, const void *caller)
|
2002-01-04 07:12:29 +08:00
|
|
|
{
|
|
|
|
void *ptr1;
|
|
|
|
int old_size;
|
|
|
|
|
|
|
|
if (size == 0) {
|
2002-01-05 08:41:11 +08:00
|
|
|
__bound_free(ptr, caller);
|
2002-01-04 07:12:29 +08:00
|
|
|
return NULL;
|
|
|
|
} else {
|
2002-01-05 08:41:11 +08:00
|
|
|
ptr1 = __bound_malloc(size, caller);
|
2002-01-04 07:12:29 +08:00
|
|
|
if (ptr == NULL || ptr1 == NULL)
|
|
|
|
return ptr1;
|
|
|
|
old_size = get_region_size(ptr);
|
|
|
|
if (old_size == EMPTY_SIZE)
|
2002-11-03 08:42:33 +08:00
|
|
|
bound_error("realloc'ing invalid pointer");
|
2002-01-04 07:12:29 +08:00
|
|
|
memcpy(ptr1, ptr, old_size);
|
2002-01-05 08:41:11 +08:00
|
|
|
__bound_free(ptr, caller);
|
2002-01-04 07:12:29 +08:00
|
|
|
return ptr1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-03-04 06:45:55 +08:00
|
|
|
#ifndef CONFIG_TCC_MALLOC_HOOKS
|
|
|
|
void *__bound_calloc(size_t nmemb, size_t size)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
size = size * nmemb;
|
2004-10-02 22:03:39 +08:00
|
|
|
ptr = __bound_malloc(size, NULL);
|
2002-03-04 06:45:55 +08:00
|
|
|
if (!ptr)
|
|
|
|
return NULL;
|
|
|
|
memset(ptr, 0, size);
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-01-04 07:12:29 +08:00
|
|
|
#if 0
|
|
|
|
static void bound_dump(void)
|
|
|
|
{
|
|
|
|
BoundEntry *page, *e;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
printf("region dump:\n");
|
|
|
|
for(i=0;i<BOUND_T1_SIZE;i++) {
|
|
|
|
page = __bound_t1[i];
|
|
|
|
for(j=0;j<BOUND_T2_SIZE;j++) {
|
|
|
|
e = page + j;
|
|
|
|
/* do not print invalid or empty entries */
|
|
|
|
if (e->size != EMPTY_SIZE && e->start != 0) {
|
|
|
|
printf("%08x:",
|
|
|
|
(i << (BOUND_T2_BITS + BOUND_T3_BITS)) +
|
|
|
|
(j << BOUND_T3_BITS));
|
|
|
|
do {
|
|
|
|
printf(" %08lx:%08lx", e->start, e->start + e->size);
|
|
|
|
e = e->next;
|
|
|
|
} while (e != NULL);
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2002-01-05 08:41:11 +08:00
|
|
|
|
2002-01-06 01:03:56 +08:00
|
|
|
/* some useful checked functions */
|
|
|
|
|
|
|
|
/* check that (p ... p + size - 1) lies inside 'p' region, if any */
|
2002-01-13 00:39:35 +08:00
|
|
|
static void __bound_check(const void *p, size_t size)
|
2002-01-06 01:03:56 +08:00
|
|
|
{
|
|
|
|
if (size == 0)
|
|
|
|
return;
|
|
|
|
p = __bound_ptr_add((void *)p, size);
|
|
|
|
if (p == INVALID_POINTER)
|
2002-11-03 08:42:33 +08:00
|
|
|
bound_error("invalid pointer");
|
2002-01-06 01:03:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *__bound_memcpy(void *dst, const void *src, size_t size)
|
|
|
|
{
|
|
|
|
__bound_check(dst, size);
|
|
|
|
__bound_check(src, size);
|
|
|
|
/* check also region overlap */
|
|
|
|
if (src >= dst && src < dst + size)
|
2002-11-03 08:42:33 +08:00
|
|
|
bound_error("overlapping regions in memcpy()");
|
2002-01-06 01:03:56 +08:00
|
|
|
return memcpy(dst, src, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *__bound_memmove(void *dst, const void *src, size_t size)
|
|
|
|
{
|
|
|
|
__bound_check(dst, size);
|
|
|
|
__bound_check(src, size);
|
|
|
|
return memmove(dst, src, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *__bound_memset(void *dst, int c, size_t size)
|
|
|
|
{
|
|
|
|
__bound_check(dst, size);
|
2002-01-06 01:05:30 +08:00
|
|
|
return memset(dst, c, size);
|
2002-01-06 01:03:56 +08:00
|
|
|
}
|
|
|
|
|
2002-01-27 02:53:47 +08:00
|
|
|
/* XXX: could be optimized */
|
|
|
|
int __bound_strlen(const char *s)
|
|
|
|
{
|
|
|
|
const char *p;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
len = 0;
|
|
|
|
for(;;) {
|
|
|
|
p = __bound_ptr_indir1((char *)s, len);
|
|
|
|
if (p == INVALID_POINTER)
|
2002-11-03 08:42:33 +08:00
|
|
|
bound_error("bad pointer in strlen()");
|
2002-01-27 02:53:47 +08:00
|
|
|
if (*p == '\0')
|
|
|
|
break;
|
|
|
|
len++;
|
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *__bound_strcpy(char *dst, const char *src)
|
|
|
|
{
|
|
|
|
int len;
|
|
|
|
len = __bound_strlen(src);
|
|
|
|
return __bound_memcpy(dst, src, len + 1);
|
|
|
|
}
|
|
|
|
|