mirror of
https://github.com/mirror/tinycc.git
synced 2024-12-28 04:00:06 +08:00
Add bound checking to arm, arm64 and riscv64
Checked on: - i386/x86_64 (linux/windows) - arm/arm64 (rapberry pi) - riscv64 (simulator) Not tested for arm softfloat because raspberry pi does not support it. Modifications: Makefile: add arm-asm.c to arm64_FILES add riscv64-asm.c (new file) to riscv64_FILES lib/Makefile: add fetch_and_add_arm.o(new file) to ARM_O add fetch_and_add_arm64.o(new file) to ARM64_O add fetch_and_add_riscv64.o(new file) to RISCV64_O add $(BCHECK_O) to OBJ-arm/OBJ-arm64/OBJ-riscv64 tcc.h: Enable CONFIG_TCC_BCHECK for arm32/arm64/riscv64 Add arm-asm.c, riscv64-asm.c tcctok.h: for arm use memmove4 instead of memcpy4 for arm use memmove8 instead of memcpy8 tccgen.c: put_extern_sym2: for arm check memcpy/memmove/memset/memmove4/memmove8 only use alloca for i386/x86_64 for arm use memmove4 instead of memcpy4 for arm use memmove8 instead of memcpy8 fix builtin_frame_address/builtin_return_address for arm/riscv64 tccrun.c: Add riscv64 support fix rt_getcontext/rt_get_caller_pc for arm tccelf.c: tcc_load_dll: Print filename for bad architecture libtcc.c: add arm-asm.c/riscv64-asm.c tcc-doc.texi: Add arm, arm64, riscv64 support for bound checking lib/bcheck.c: add __bound___aeabi_memcpy/__bound___aeabi_memmove __bound___aeabi_memmove4/__bound___aeabi_memmove8 __bound___aeabi_memset for arm call fetch_and_add_arm/fetch_and_add_arm64/fetch_and_add_riscv64 __bound_init: Fix type for start/end/ad __bound_malloc/__bound_memalign/__bound_realloc/__bound_calloc: Use size + 1 arm-gen.c: add bound checking code like i386/x86_64 assign_regs: only malloc if nb_args != 0 gen_opi/gen_opf: Fix reload problems arm-link.c: relocate_plt: Fix address calculating arm64-gen.c: add bound checking code like i386/x86_64 load/store: remove VT_BOUNDED from sv->r arm64_hfa_aux/arm64_hfa_aux: Fix array code gfunc_prolog: only malloc if n != 0 arm64-link.c: code_reloc/gotplt_entry_type/relocate: add R_AARCH64_LDST64_ABS_LO12_NC relocate: Use addXXle instead of writeXXle riscv64-gen.c: add bound checking code like i386/x86_64 add NB_ASM_REGS/CONFIG_TCC_ASM riscv64-link.c: relocate: Use addXXle instead of writeXXle i386-gen.c/x86_64-gen.c gen_bounds_epilog: Fix code (unrelated) tests/Makefile: add $(BTESTS) for arm/arm64/riscv64 tests/tests2/Makefile: Use 85 only on i386/x86_64 because of asm code Use 113 only on i386/x86_64 because of DLL code Add 112/114/115/116 for arm/arm64/riscv64 Fix FILTER (failed on riscv64) tests/boundtest.c: Only use alloca for i386/x86_64
This commit is contained in:
parent
9eef33993a
commit
0b8ee7364a
4
Makefile
4
Makefile
@ -176,9 +176,9 @@ arm-fpa-ld_FILES = $(arm_FILES)
|
|||||||
arm-vfp_FILES = $(arm_FILES)
|
arm-vfp_FILES = $(arm_FILES)
|
||||||
arm-eabi_FILES = $(arm_FILES)
|
arm-eabi_FILES = $(arm_FILES)
|
||||||
arm-eabihf_FILES = $(arm_FILES)
|
arm-eabihf_FILES = $(arm_FILES)
|
||||||
arm64_FILES = $(CORE_FILES) arm64-gen.c arm64-link.c
|
arm64_FILES = $(CORE_FILES) arm64-gen.c arm64-link.c arm-asm.c
|
||||||
c67_FILES = $(CORE_FILES) c67-gen.c c67-link.c tcccoff.c
|
c67_FILES = $(CORE_FILES) c67-gen.c c67-link.c tcccoff.c
|
||||||
riscv64_FILES = $(CORE_FILES) riscv64-gen.c riscv64-link.c
|
riscv64_FILES = $(CORE_FILES) riscv64-gen.c riscv64-link.c riscv64-asm.c
|
||||||
|
|
||||||
# libtcc sources
|
# libtcc sources
|
||||||
LIBTCC_SRC = $(filter-out tcc.c tcctools.c,$(filter %.c,$($T_FILES)))
|
LIBTCC_SRC = $(filter-out tcc.c tcctools.c,$(filter %.c,$($T_FILES)))
|
||||||
|
218
arm-gen.c
218
arm-gen.c
@ -158,6 +158,12 @@ ST_DATA const int reg_classes[NB_REGS] = {
|
|||||||
static int func_sub_sp_offset, last_itod_magic;
|
static int func_sub_sp_offset, last_itod_magic;
|
||||||
static int leaffunc;
|
static int leaffunc;
|
||||||
|
|
||||||
|
#if defined(CONFIG_TCC_BCHECK)
|
||||||
|
static addr_t func_bound_offset;
|
||||||
|
static unsigned long func_bound_ind;
|
||||||
|
static int func_bound_add_epilog;
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
|
#if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
|
||||||
static CType float_type, double_type, func_float_type, func_double_type;
|
static CType float_type, double_type, func_float_type, func_double_type;
|
||||||
ST_FUNC void arm_init(struct TCCState *s)
|
ST_FUNC void arm_init(struct TCCState *s)
|
||||||
@ -193,11 +199,17 @@ ST_FUNC void arm_init(struct TCCState *s)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define CHECK_R(r) ((r) >= TREG_R0 && (r) <= TREG_LR)
|
||||||
|
|
||||||
static int two2mask(int a,int b) {
|
static int two2mask(int a,int b) {
|
||||||
|
if (!CHECK_R(a) || !CHECK_R(b))
|
||||||
|
tcc_error("compiler error! registers %i,%i is not valid",a,b);
|
||||||
return (reg_classes[a]|reg_classes[b])&~(RC_INT|RC_FLOAT);
|
return (reg_classes[a]|reg_classes[b])&~(RC_INT|RC_FLOAT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int regmask(int r) {
|
static int regmask(int r) {
|
||||||
|
if (!CHECK_R(r))
|
||||||
|
tcc_error("compiler error! register %i is not valid",r);
|
||||||
return reg_classes[r]&~(RC_INT|RC_FLOAT);
|
return reg_classes[r]&~(RC_INT|RC_FLOAT);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -751,6 +763,14 @@ static void gcall_or_jmp(int is_jmp)
|
|||||||
greloc(cur_text_section, vtop->sym, ind, R_ARM_ABS32);
|
greloc(cur_text_section, vtop->sym, ind, R_ARM_ABS32);
|
||||||
o(vtop->c.i);
|
o(vtop->c.i);
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check &&
|
||||||
|
(vtop->sym->v == TOK_setjmp ||
|
||||||
|
vtop->sym->v == TOK__setjmp ||
|
||||||
|
vtop->sym->v == TOK_sigsetjmp ||
|
||||||
|
vtop->sym->v == TOK___sigsetjmp))
|
||||||
|
func_bound_add_epilog = 1;
|
||||||
|
#endif
|
||||||
}else{
|
}else{
|
||||||
if(!is_jmp)
|
if(!is_jmp)
|
||||||
o(0xE28FE004); // add lr,pc,#4
|
o(0xE28FE004); // add lr,pc,#4
|
||||||
@ -759,6 +779,9 @@ static void gcall_or_jmp(int is_jmp)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* otherwise, indirect call */
|
/* otherwise, indirect call */
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
vtop->r &= ~VT_MUSTBOUND;
|
||||||
|
#endif
|
||||||
r = gv(RC_INT);
|
r = gv(RC_INT);
|
||||||
if(!is_jmp)
|
if(!is_jmp)
|
||||||
o(0xE1A0E00F); // mov lr,pc
|
o(0xE1A0E00F); // mov lr,pc
|
||||||
@ -766,6 +789,121 @@ static void gcall_or_jmp(int is_jmp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_TCC_BCHECK)
|
||||||
|
|
||||||
|
static void gen_bounds_call(int v)
|
||||||
|
{
|
||||||
|
Sym *sym = external_global_sym(v, &func_old_type);
|
||||||
|
|
||||||
|
greloc(cur_text_section, sym, ind, R_ARM_PC24);
|
||||||
|
o(0xebfffffe);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate a bounded pointer addition */
|
||||||
|
ST_FUNC void gen_bounded_ptr_add(void)
|
||||||
|
{
|
||||||
|
vpush_global_sym(&func_old_type, TOK___bound_ptr_add);
|
||||||
|
vrott(3);
|
||||||
|
gfunc_call(2);
|
||||||
|
vpushi(0);
|
||||||
|
/* returned pointer is in REG_IRET */
|
||||||
|
vtop->r = REG_IRET | VT_BOUNDED;
|
||||||
|
if (nocode_wanted)
|
||||||
|
return;
|
||||||
|
/* relocation offset of the bounding function call point */
|
||||||
|
vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(Elf32_Rel));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* patch pointer addition in vtop so that pointer dereferencing is
|
||||||
|
also tested */
|
||||||
|
ST_FUNC void gen_bounded_ptr_deref(void)
|
||||||
|
{
|
||||||
|
addr_t func;
|
||||||
|
int size, align;
|
||||||
|
Elf32_Rel *rel;
|
||||||
|
Sym *sym;
|
||||||
|
|
||||||
|
if (nocode_wanted)
|
||||||
|
return;
|
||||||
|
|
||||||
|
size = type_size(&vtop->type, &align);
|
||||||
|
switch(size) {
|
||||||
|
case 1: func = TOK___bound_ptr_indir1; break;
|
||||||
|
case 2: func = TOK___bound_ptr_indir2; break;
|
||||||
|
case 4: func = TOK___bound_ptr_indir4; break;
|
||||||
|
case 8: func = TOK___bound_ptr_indir8; break;
|
||||||
|
case 12: func = TOK___bound_ptr_indir12; break;
|
||||||
|
case 16: func = TOK___bound_ptr_indir16; break;
|
||||||
|
default:
|
||||||
|
/* may happen with struct member access */
|
||||||
|
return;
|
||||||
|
//tcc_error("unhandled size when dereferencing bounded pointer");
|
||||||
|
//func = 0;
|
||||||
|
//break;
|
||||||
|
}
|
||||||
|
sym = external_global_sym(func, &func_old_type);
|
||||||
|
if (!sym->c)
|
||||||
|
put_extern_sym(sym, NULL, 0, 0);
|
||||||
|
/* patch relocation */
|
||||||
|
/* XXX: find a better solution ? */
|
||||||
|
rel = (Elf32_Rel *)(cur_text_section->reloc->data + vtop->c.i);
|
||||||
|
rel->r_info = ELF32_R_INFO(sym->c, ELF32_R_TYPE(rel->r_info));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gen_bounds_prolog(void)
|
||||||
|
{
|
||||||
|
/* leave some room for bound checking code */
|
||||||
|
func_bound_offset = lbounds_section->data_offset;
|
||||||
|
func_bound_ind = ind;
|
||||||
|
func_bound_add_epilog = 0;
|
||||||
|
o(0xe1a00000); /* ld r0,lbounds_section->data_offset */
|
||||||
|
o(0xe1a00000);
|
||||||
|
o(0xe1a00000);
|
||||||
|
o(0xe1a00000); /* call __bound_local_new */
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gen_bounds_epilog(void)
|
||||||
|
{
|
||||||
|
addr_t saved_ind;
|
||||||
|
addr_t *bounds_ptr;
|
||||||
|
Sym *sym_data;
|
||||||
|
int offset_modified = func_bound_offset != lbounds_section->data_offset;
|
||||||
|
|
||||||
|
if (!offset_modified && !func_bound_add_epilog)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* add end of table info */
|
||||||
|
bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
|
||||||
|
*bounds_ptr = 0;
|
||||||
|
|
||||||
|
sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
|
||||||
|
func_bound_offset, lbounds_section->data_offset);
|
||||||
|
|
||||||
|
/* generate bound local allocation */
|
||||||
|
if (offset_modified) {
|
||||||
|
saved_ind = ind;
|
||||||
|
ind = func_bound_ind;
|
||||||
|
o(0xe59f0000); /* ldr r0, [pc] */
|
||||||
|
o(0xea000000); /* b $+4 */
|
||||||
|
greloc(cur_text_section, sym_data, ind, R_ARM_ABS32);
|
||||||
|
o(0x00000000); /* lbounds_section->data_offset */
|
||||||
|
gen_bounds_call(TOK___bound_local_new);
|
||||||
|
ind = saved_ind;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate bound check local freeing */
|
||||||
|
o(0xe92d0003); /* push {r0,r1} */
|
||||||
|
o(0xed2d0b02); /* vpush {d0} */
|
||||||
|
o(0xe59f0000); /* ldr r0, [pc] */
|
||||||
|
o(0xea000000); /* b $+4 */
|
||||||
|
greloc(cur_text_section, sym_data, ind, R_ARM_ABS32);
|
||||||
|
o(0x00000000); /* lbounds_section->data_offset */
|
||||||
|
gen_bounds_call(TOK___bound_local_delete);
|
||||||
|
o(0xecbd0b02); /* vpop {d0} */
|
||||||
|
o(0xe8bd0003); /* pop {r0,r1} */
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int unalias_ldbl(int btype)
|
static int unalias_ldbl(int btype)
|
||||||
{
|
{
|
||||||
#if LDOUBLE_SIZE == 8
|
#if LDOUBLE_SIZE == 8
|
||||||
@ -956,7 +1094,7 @@ static int assign_regs(int nb_args, int float_abi, struct plan *plan, int *todo)
|
|||||||
|
|
||||||
ncrn = nsaa = 0;
|
ncrn = nsaa = 0;
|
||||||
*todo = 0;
|
*todo = 0;
|
||||||
plan->pplans = tcc_malloc(nb_args * sizeof(*plan->pplans));
|
plan->pplans = nb_args ? tcc_malloc(nb_args * sizeof(*plan->pplans)) : NULL;
|
||||||
memset(plan->clsplans, 0, sizeof(plan->clsplans));
|
memset(plan->clsplans, 0, sizeof(plan->clsplans));
|
||||||
for(i = nb_args; i-- ;) {
|
for(i = nb_args; i-- ;) {
|
||||||
int j, start_vfpreg = 0;
|
int j, start_vfpreg = 0;
|
||||||
@ -1215,10 +1353,16 @@ void gfunc_call(int nb_args)
|
|||||||
int def_float_abi = float_abi;
|
int def_float_abi = float_abi;
|
||||||
int todo;
|
int todo;
|
||||||
struct plan plan;
|
struct plan plan;
|
||||||
|
|
||||||
#ifdef TCC_ARM_EABI
|
#ifdef TCC_ARM_EABI
|
||||||
int variadic;
|
int variadic;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gbound_args(nb_args);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef TCC_ARM_EABI
|
||||||
if (float_abi == ARM_HARD_FLOAT) {
|
if (float_abi == ARM_HARD_FLOAT) {
|
||||||
variadic = (vtop[-nb_args].type.ref->f.func_type == FUNC_ELLIPSIS);
|
variadic = (vtop[-nb_args].type.ref->f.func_type == FUNC_ELLIPSIS);
|
||||||
if (variadic || floats_in_core_regs(&vtop[-nb_args]))
|
if (variadic || floats_in_core_regs(&vtop[-nb_args]))
|
||||||
@ -1367,6 +1511,10 @@ from_stack:
|
|||||||
last_itod_magic=0;
|
last_itod_magic=0;
|
||||||
leaffunc = 1;
|
leaffunc = 1;
|
||||||
loc = 0;
|
loc = 0;
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gen_bounds_prolog();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* generate function epilog */
|
/* generate function epilog */
|
||||||
@ -1374,6 +1522,11 @@ void gfunc_epilog(void)
|
|||||||
{
|
{
|
||||||
uint32_t x;
|
uint32_t x;
|
||||||
int diff;
|
int diff;
|
||||||
|
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gen_bounds_epilog();
|
||||||
|
#endif
|
||||||
/* Copy float return value to core register if base standard is used and
|
/* Copy float return value to core register if base standard is used and
|
||||||
float computation is made with VFP */
|
float computation is made with VFP */
|
||||||
#if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
|
#if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
|
||||||
@ -1582,10 +1735,10 @@ void gen_opi(int op)
|
|||||||
vswap();
|
vswap();
|
||||||
c=intr(gv(RC_INT));
|
c=intr(gv(RC_INT));
|
||||||
vswap();
|
vswap();
|
||||||
opc=0xE0000000|(opc<<20)|(c<<16);
|
opc=0xE0000000|(opc<<20);
|
||||||
if((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
|
if((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
|
||||||
uint32_t x;
|
uint32_t x;
|
||||||
x=stuff_const(opc|0x2000000,vtop->c.i);
|
x=stuff_const(opc|0x2000000|(c<<16),vtop->c.i);
|
||||||
if(x) {
|
if(x) {
|
||||||
r=intr(vtop[-1].r=get_reg_ex(RC_INT,regmask(vtop[-1].r)));
|
r=intr(vtop[-1].r=get_reg_ex(RC_INT,regmask(vtop[-1].r)));
|
||||||
o(x|(r<<12));
|
o(x|(r<<12));
|
||||||
@ -1593,8 +1746,13 @@ void gen_opi(int op)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
fr=intr(gv(RC_INT));
|
fr=intr(gv(RC_INT));
|
||||||
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
||||||
|
vswap();
|
||||||
|
c=intr(gv(RC_INT));
|
||||||
|
vswap();
|
||||||
|
}
|
||||||
r=intr(vtop[-1].r=get_reg_ex(RC_INT,two2mask(vtop->r,vtop[-1].r)));
|
r=intr(vtop[-1].r=get_reg_ex(RC_INT,two2mask(vtop->r,vtop[-1].r)));
|
||||||
o(opc|(r<<12)|fr);
|
o(opc|(c<<16)|(r<<12)|fr);
|
||||||
done:
|
done:
|
||||||
vtop--;
|
vtop--;
|
||||||
if (op >= TOK_ULT && op <= TOK_GT)
|
if (op >= TOK_ULT && op <= TOK_GT)
|
||||||
@ -1608,15 +1766,19 @@ done:
|
|||||||
vswap();
|
vswap();
|
||||||
r=intr(gv(RC_INT));
|
r=intr(gv(RC_INT));
|
||||||
vswap();
|
vswap();
|
||||||
opc|=r;
|
|
||||||
if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
|
if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
|
||||||
fr=intr(vtop[-1].r=get_reg_ex(RC_INT,regmask(vtop[-1].r)));
|
fr=intr(vtop[-1].r=get_reg_ex(RC_INT,regmask(vtop[-1].r)));
|
||||||
c = vtop->c.i & 0x1f;
|
c = vtop->c.i & 0x1f;
|
||||||
o(opc|(c<<7)|(fr<<12));
|
o(opc|r|(c<<7)|(fr<<12));
|
||||||
} else {
|
} else {
|
||||||
fr=intr(gv(RC_INT));
|
fr=intr(gv(RC_INT));
|
||||||
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
||||||
|
vswap();
|
||||||
|
r=intr(gv(RC_INT));
|
||||||
|
vswap();
|
||||||
|
}
|
||||||
c=intr(vtop[-1].r=get_reg_ex(RC_INT,two2mask(vtop->r,vtop[-1].r)));
|
c=intr(vtop[-1].r=get_reg_ex(RC_INT,two2mask(vtop->r,vtop[-1].r)));
|
||||||
o(opc|(c<<12)|(fr<<8)|0x10);
|
o(opc|r|(c<<12)|(fr<<8)|0x10);
|
||||||
}
|
}
|
||||||
vtop--;
|
vtop--;
|
||||||
break;
|
break;
|
||||||
@ -1701,9 +1863,9 @@ void gen_opf(int op)
|
|||||||
vtop--;
|
vtop--;
|
||||||
o(x|0x10000|(vfpr(gv(RC_FLOAT))<<12)); /* fcmp(e)X -> fcmp(e)zX */
|
o(x|0x10000|(vfpr(gv(RC_FLOAT))<<12)); /* fcmp(e)X -> fcmp(e)zX */
|
||||||
} else {
|
} else {
|
||||||
x|=vfpr(gv(RC_FLOAT));
|
gv2(RC_FLOAT,RC_FLOAT);
|
||||||
vswap();
|
x|=vfpr(vtop[0].r);
|
||||||
o(x|(vfpr(gv(RC_FLOAT))<<12));
|
o(x|(vfpr(vtop[-1].r) << 12));
|
||||||
vtop--;
|
vtop--;
|
||||||
}
|
}
|
||||||
o(0xEEF1FA10); /* fmstat */
|
o(0xEEF1FA10); /* fmstat */
|
||||||
@ -1726,6 +1888,12 @@ void gen_opf(int op)
|
|||||||
r2=gv(RC_FLOAT);
|
r2=gv(RC_FLOAT);
|
||||||
x|=vfpr(r2)<<16;
|
x|=vfpr(r2)<<16;
|
||||||
r|=regmask(r2);
|
r|=regmask(r2);
|
||||||
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
||||||
|
vswap();
|
||||||
|
r=gv(RC_FLOAT);
|
||||||
|
vswap();
|
||||||
|
x=(x&~0xf)|vfpr(r);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
vtop->r=get_reg_ex(RC_FLOAT,r);
|
vtop->r=get_reg_ex(RC_FLOAT,r);
|
||||||
if(!fneg)
|
if(!fneg)
|
||||||
@ -1808,6 +1976,11 @@ void gen_opf(int op)
|
|||||||
r2=c2&0xf;
|
r2=c2&0xf;
|
||||||
} else {
|
} else {
|
||||||
r2=fpr(gv(RC_FLOAT));
|
r2=fpr(gv(RC_FLOAT));
|
||||||
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
||||||
|
vswap();
|
||||||
|
r=fpr(gv(RC_FLOAT));
|
||||||
|
vswap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '-':
|
case '-':
|
||||||
@ -1829,6 +2002,11 @@ void gen_opf(int op)
|
|||||||
r=fpr(gv(RC_FLOAT));
|
r=fpr(gv(RC_FLOAT));
|
||||||
vswap();
|
vswap();
|
||||||
r2=fpr(gv(RC_FLOAT));
|
r2=fpr(gv(RC_FLOAT));
|
||||||
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
||||||
|
vswap();
|
||||||
|
r=fpr(gv(RC_FLOAT));
|
||||||
|
vswap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '*':
|
case '*':
|
||||||
@ -1841,8 +2019,14 @@ void gen_opf(int op)
|
|||||||
vswap();
|
vswap();
|
||||||
if(c2 && c2<=0xf)
|
if(c2 && c2<=0xf)
|
||||||
r2=c2;
|
r2=c2;
|
||||||
else
|
else {
|
||||||
r2=fpr(gv(RC_FLOAT));
|
r2=fpr(gv(RC_FLOAT));
|
||||||
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
||||||
|
vswap();
|
||||||
|
r=fpr(gv(RC_FLOAT));
|
||||||
|
vswap();
|
||||||
|
}
|
||||||
|
}
|
||||||
x|=0x100000; // muf
|
x|=0x100000; // muf
|
||||||
break;
|
break;
|
||||||
case '/':
|
case '/':
|
||||||
@ -1863,6 +2047,11 @@ void gen_opf(int op)
|
|||||||
r=fpr(gv(RC_FLOAT));
|
r=fpr(gv(RC_FLOAT));
|
||||||
vswap();
|
vswap();
|
||||||
r2=fpr(gv(RC_FLOAT));
|
r2=fpr(gv(RC_FLOAT));
|
||||||
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
||||||
|
vswap();
|
||||||
|
r=fpr(gv(RC_FLOAT));
|
||||||
|
vswap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -1915,6 +2104,11 @@ void gen_opf(int op)
|
|||||||
r2=c2&0xf;
|
r2=c2&0xf;
|
||||||
} else {
|
} else {
|
||||||
r2=fpr(gv(RC_FLOAT));
|
r2=fpr(gv(RC_FLOAT));
|
||||||
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
||||||
|
vswap();
|
||||||
|
r=fpr(gv(RC_FLOAT));
|
||||||
|
vswap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
--vtop;
|
--vtop;
|
||||||
vset_VT_CMP(op);
|
vset_VT_CMP(op);
|
||||||
|
@ -155,7 +155,7 @@ ST_FUNC void relocate_plt(TCCState *s1)
|
|||||||
while (p < p_end) {
|
while (p < p_end) {
|
||||||
if (read32le(p) == 0x46c04778) /* PLT Thumb stub present */
|
if (read32le(p) == 0x46c04778) /* PLT Thumb stub present */
|
||||||
p += 4;
|
p += 4;
|
||||||
add32le(p + 12, x + s1->plt->data - p);
|
add32le(p + 12, x + (s1->plt->data - p));
|
||||||
p += 16;
|
p += 16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
171
arm64-gen.c
171
arm64-gen.c
@ -81,6 +81,12 @@ ST_DATA const int reg_classes[NB_REGS] = {
|
|||||||
RC_FLOAT | RC_F(7)
|
RC_FLOAT | RC_F(7)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if defined(CONFIG_TCC_BCHECK)
|
||||||
|
static addr_t func_bound_offset;
|
||||||
|
static unsigned long func_bound_ind;
|
||||||
|
static int func_bound_add_epilog;
|
||||||
|
#endif
|
||||||
|
|
||||||
#define IS_FREG(x) ((x) >= TREG_F(0))
|
#define IS_FREG(x) ((x) >= TREG_F(0))
|
||||||
|
|
||||||
static uint32_t intr(int r)
|
static uint32_t intr(int r)
|
||||||
@ -454,7 +460,7 @@ static void arm64_load_cmp(int r, SValue *sv);
|
|||||||
ST_FUNC void load(int r, SValue *sv)
|
ST_FUNC void load(int r, SValue *sv)
|
||||||
{
|
{
|
||||||
int svtt = sv->type.t;
|
int svtt = sv->type.t;
|
||||||
int svr = sv->r;
|
int svr = sv->r & ~VT_BOUNDED;
|
||||||
int svrv = svr & VT_VALMASK;
|
int svrv = svr & VT_VALMASK;
|
||||||
uint64_t svcul = (uint32_t)sv->c.i;
|
uint64_t svcul = (uint32_t)sv->c.i;
|
||||||
svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
|
svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
|
||||||
@ -554,7 +560,7 @@ ST_FUNC void load(int r, SValue *sv)
|
|||||||
ST_FUNC void store(int r, SValue *sv)
|
ST_FUNC void store(int r, SValue *sv)
|
||||||
{
|
{
|
||||||
int svtt = sv->type.t;
|
int svtt = sv->type.t;
|
||||||
int svr = sv->r;
|
int svr = sv->r & ~VT_BOUNDED;
|
||||||
int svrv = svr & VT_VALMASK;
|
int svrv = svr & VT_VALMASK;
|
||||||
uint64_t svcul = (uint32_t)sv->c.i;
|
uint64_t svcul = (uint32_t)sv->c.i;
|
||||||
svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
|
svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
|
||||||
@ -594,11 +600,147 @@ static void arm64_gen_bl_or_b(int b)
|
|||||||
assert(!b);
|
assert(!b);
|
||||||
greloca(cur_text_section, vtop->sym, ind, R_AARCH64_CALL26, 0);
|
greloca(cur_text_section, vtop->sym, ind, R_AARCH64_CALL26, 0);
|
||||||
o(0x94000000); // bl .
|
o(0x94000000); // bl .
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check &&
|
||||||
|
(vtop->sym->v == TOK_setjmp ||
|
||||||
|
vtop->sym->v == TOK__setjmp ||
|
||||||
|
vtop->sym->v == TOK_sigsetjmp ||
|
||||||
|
vtop->sym->v == TOK___sigsetjmp))
|
||||||
|
func_bound_add_epilog = 1;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
else
|
else {
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
vtop->r &= ~VT_MUSTBOUND;
|
||||||
|
#endif
|
||||||
o(0xd61f0000 | (uint32_t)!b << 21 | intr(gv(RC_R30)) << 5); // br/blr
|
o(0xd61f0000 | (uint32_t)!b << 21 | intr(gv(RC_R30)) << 5); // br/blr
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_TCC_BCHECK)
|
||||||
|
|
||||||
|
static void gen_bounds_call(int v)
|
||||||
|
{
|
||||||
|
Sym *sym = external_global_sym(v, &func_old_type);
|
||||||
|
|
||||||
|
greloca(cur_text_section, sym, ind, R_AARCH64_CALL26, 0);
|
||||||
|
o(0x94000000); // bl
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate a bounded pointer addition */
|
||||||
|
ST_FUNC void gen_bounded_ptr_add(void)
|
||||||
|
{
|
||||||
|
vpush_global_sym(&func_old_type, TOK___bound_ptr_add);
|
||||||
|
vrott(3);
|
||||||
|
gfunc_call(2);
|
||||||
|
vpushi(0);
|
||||||
|
/* returned pointer is in REG_IRET */
|
||||||
|
vtop->r = REG_IRET | VT_BOUNDED;
|
||||||
|
if (nocode_wanted)
|
||||||
|
return;
|
||||||
|
/* relocation offset of the bounding function call point */
|
||||||
|
vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* patch pointer addition in vtop so that pointer dereferencing is
|
||||||
|
also tested */
|
||||||
|
ST_FUNC void gen_bounded_ptr_deref(void)
|
||||||
|
{
|
||||||
|
addr_t func;
|
||||||
|
int size, align;
|
||||||
|
ElfW(Rela) *rel;
|
||||||
|
Sym *sym;
|
||||||
|
|
||||||
|
if (nocode_wanted)
|
||||||
|
return;
|
||||||
|
|
||||||
|
size = type_size(&vtop->type, &align);
|
||||||
|
switch(size) {
|
||||||
|
case 1: func = TOK___bound_ptr_indir1; break;
|
||||||
|
case 2: func = TOK___bound_ptr_indir2; break;
|
||||||
|
case 4: func = TOK___bound_ptr_indir4; break;
|
||||||
|
case 8: func = TOK___bound_ptr_indir8; break;
|
||||||
|
case 12: func = TOK___bound_ptr_indir12; break;
|
||||||
|
case 16: func = TOK___bound_ptr_indir16; break;
|
||||||
|
default:
|
||||||
|
/* may happen with struct member access */
|
||||||
|
return;
|
||||||
|
//tcc_error("unhandled size when dereferencing bounded pointer");
|
||||||
|
//func = 0;
|
||||||
|
//break;
|
||||||
|
}
|
||||||
|
sym = external_global_sym(func, &func_old_type);
|
||||||
|
if (!sym->c)
|
||||||
|
put_extern_sym(sym, NULL, 0, 0);
|
||||||
|
/* patch relocation */
|
||||||
|
/* XXX: find a better solution ? */
|
||||||
|
rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
|
||||||
|
rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gen_bounds_prolog(void)
|
||||||
|
{
|
||||||
|
/* leave some room for bound checking code */
|
||||||
|
func_bound_offset = lbounds_section->data_offset;
|
||||||
|
func_bound_ind = ind;
|
||||||
|
func_bound_add_epilog = 0;
|
||||||
|
o(0xd503201f); /* nop -> mov x0,#0,lsl #0, lbound section pointer */
|
||||||
|
o(0xd503201f);
|
||||||
|
o(0xd503201f);
|
||||||
|
o(0xd503201f);
|
||||||
|
o(0xd503201f); /* nop -> call __bound_local_new */
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gen_bounds_epilog(void)
|
||||||
|
{
|
||||||
|
addr_t saved_ind;
|
||||||
|
addr_t *bounds_ptr;
|
||||||
|
Sym *sym_data;
|
||||||
|
int offset_modified = func_bound_offset != lbounds_section->data_offset;
|
||||||
|
|
||||||
|
if (!offset_modified && !func_bound_add_epilog)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* add end of table info */
|
||||||
|
bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
|
||||||
|
*bounds_ptr = 0;
|
||||||
|
|
||||||
|
sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
|
||||||
|
func_bound_offset, lbounds_section->data_offset);
|
||||||
|
|
||||||
|
/* generate bound local allocation */
|
||||||
|
if (offset_modified) {
|
||||||
|
saved_ind = ind;
|
||||||
|
ind = func_bound_ind;
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_AARCH64_MOVW_UABS_G0_NC, 0);
|
||||||
|
o(0xd2800000); /* mov x0,#0,lsl #0, lbound section pointer */
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_AARCH64_MOVW_UABS_G1_NC, 0);
|
||||||
|
o(0xf2a00000); /* movk x0,#0,lsl #16 */
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_AARCH64_MOVW_UABS_G2_NC, 0);
|
||||||
|
o(0xf2c00000); /* movk x0,#0,lsl #32 */
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_AARCH64_MOVW_UABS_G3, 0);
|
||||||
|
o(0xf2e00000); /* movk x0,#0,lsl #48 */
|
||||||
|
gen_bounds_call(TOK___bound_local_new);
|
||||||
|
ind = saved_ind;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate bound check local freeing */
|
||||||
|
o(0xf81f0fe0); /* str x0, [sp, #-16]! */
|
||||||
|
o(0x3c9f0fe0); /* str q0, [sp, #-16]! */
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_AARCH64_MOVW_UABS_G0_NC, 0);
|
||||||
|
o(0xd2800000); // mov x0,#0,lsl #0
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_AARCH64_MOVW_UABS_G1_NC, 0);
|
||||||
|
o(0xf2a00000); // movk x0,#0,lsl #16
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_AARCH64_MOVW_UABS_G2_NC, 0);
|
||||||
|
o(0xf2c00000); // movk x0,#0,lsl #32
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_AARCH64_MOVW_UABS_G3, 0);
|
||||||
|
o(0xf2e00000); // movk x0,#0,lsl #48
|
||||||
|
gen_bounds_call(TOK___bound_local_delete);
|
||||||
|
o(0x3cc107e0); /* ldr q0, [sp], #16 */
|
||||||
|
o(0xf84107e0); /* ldr x0, [sp], #16 */
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int arm64_hfa_aux(CType *type, int *fsize, int num)
|
static int arm64_hfa_aux(CType *type, int *fsize, int num)
|
||||||
{
|
{
|
||||||
if (is_float(type->t)) {
|
if (is_float(type->t)) {
|
||||||
@ -642,7 +784,7 @@ static int arm64_hfa_aux(CType *type, int *fsize, int num)
|
|||||||
return num;
|
return num;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (type->t & VT_ARRAY) {
|
else if ((type->t & VT_ARRAY) && ((type->t & VT_BTYPE) != VT_PTR)) {
|
||||||
int num1;
|
int num1;
|
||||||
if (!type->ref->c)
|
if (!type->ref->c)
|
||||||
return num;
|
return num;
|
||||||
@ -659,7 +801,8 @@ static int arm64_hfa_aux(CType *type, int *fsize, int num)
|
|||||||
|
|
||||||
static int arm64_hfa(CType *type, int *fsize)
|
static int arm64_hfa(CType *type, int *fsize)
|
||||||
{
|
{
|
||||||
if ((type->t & VT_BTYPE) == VT_STRUCT || (type->t & VT_ARRAY)) {
|
if ((type->t & VT_BTYPE) == VT_STRUCT ||
|
||||||
|
((type->t & VT_ARRAY) && ((type->t & VT_BTYPE) != VT_PTR))) {
|
||||||
int sz = 0;
|
int sz = 0;
|
||||||
int n = arm64_hfa_aux(type, &sz, 0);
|
int n = arm64_hfa_aux(type, &sz, 0);
|
||||||
if (0 < n && n <= 4) {
|
if (0 < n && n <= 4) {
|
||||||
@ -839,6 +982,11 @@ ST_FUNC void gfunc_call(int nb_args)
|
|||||||
unsigned long stack;
|
unsigned long stack;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gbound_args(nb_args);
|
||||||
|
#endif
|
||||||
|
|
||||||
return_type = &vtop[-nb_args].type.ref->type;
|
return_type = &vtop[-nb_args].type.ref->type;
|
||||||
if ((return_type->t & VT_BTYPE) == VT_STRUCT)
|
if ((return_type->t & VT_BTYPE) == VT_STRUCT)
|
||||||
--nb_args;
|
--nb_args;
|
||||||
@ -1014,8 +1162,8 @@ ST_FUNC void gfunc_prolog(Sym *func_sym)
|
|||||||
|
|
||||||
for (sym = func_type->ref; sym; sym = sym->next)
|
for (sym = func_type->ref; sym; sym = sym->next)
|
||||||
++n;
|
++n;
|
||||||
t = tcc_malloc(n * sizeof(*t));
|
t = n ? tcc_malloc(n * sizeof(*t)) : NULL;
|
||||||
a = tcc_malloc(n * sizeof(*a));
|
a = n ? tcc_malloc(n * sizeof(*a)) : NULL;
|
||||||
|
|
||||||
for (sym = func_type->ref; sym; sym = sym->next)
|
for (sym = func_type->ref; sym; sym = sym->next)
|
||||||
t[i++] = &sym->type;
|
t[i++] = &sym->type;
|
||||||
@ -1076,6 +1224,10 @@ ST_FUNC void gfunc_prolog(Sym *func_sym)
|
|||||||
o(0xd503201f); // nop
|
o(0xd503201f); // nop
|
||||||
o(0xd503201f); // nop
|
o(0xd503201f); // nop
|
||||||
loc = 0;
|
loc = 0;
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gen_bounds_prolog();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
ST_FUNC void gen_va_start(void)
|
ST_FUNC void gen_va_start(void)
|
||||||
@ -1246,6 +1398,11 @@ ST_FUNC void gfunc_return(CType *func_type)
|
|||||||
|
|
||||||
ST_FUNC void gfunc_epilog(void)
|
ST_FUNC void gfunc_epilog(void)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gen_bounds_epilog();
|
||||||
|
#endif
|
||||||
|
|
||||||
if (loc) {
|
if (loc) {
|
||||||
// Insert instructions to subtract size of stack frame from SP.
|
// Insert instructions to subtract size of stack frame from SP.
|
||||||
unsigned char *ptr = cur_text_section->data + arm64_func_sub_sp_offset;
|
unsigned char *ptr = cur_text_section->data + arm64_func_sub_sp_offset;
|
||||||
|
10
arm64-link.c
10
arm64-link.c
@ -37,6 +37,7 @@ int code_reloc (int reloc_type)
|
|||||||
case R_AARCH64_ADD_ABS_LO12_NC:
|
case R_AARCH64_ADD_ABS_LO12_NC:
|
||||||
case R_AARCH64_ADR_GOT_PAGE:
|
case R_AARCH64_ADR_GOT_PAGE:
|
||||||
case R_AARCH64_LD64_GOT_LO12_NC:
|
case R_AARCH64_LD64_GOT_LO12_NC:
|
||||||
|
case R_AARCH64_LDST64_ABS_LO12_NC:
|
||||||
case R_AARCH64_GLOB_DAT:
|
case R_AARCH64_GLOB_DAT:
|
||||||
case R_AARCH64_COPY:
|
case R_AARCH64_COPY:
|
||||||
return 0;
|
return 0;
|
||||||
@ -62,6 +63,7 @@ int gotplt_entry_type (int reloc_type)
|
|||||||
case R_AARCH64_MOVW_UABS_G3:
|
case R_AARCH64_MOVW_UABS_G3:
|
||||||
case R_AARCH64_ADR_PREL_PG_HI21:
|
case R_AARCH64_ADR_PREL_PG_HI21:
|
||||||
case R_AARCH64_ADD_ABS_LO12_NC:
|
case R_AARCH64_ADD_ABS_LO12_NC:
|
||||||
|
case R_AARCH64_LDST64_ABS_LO12_NC:
|
||||||
case R_AARCH64_GLOB_DAT:
|
case R_AARCH64_GLOB_DAT:
|
||||||
case R_AARCH64_JUMP_SLOT:
|
case R_AARCH64_JUMP_SLOT:
|
||||||
case R_AARCH64_COPY:
|
case R_AARCH64_COPY:
|
||||||
@ -157,10 +159,10 @@ void relocate(TCCState *s1, ElfW_Rel *rel, int type, unsigned char *ptr, addr_t
|
|||||||
|
|
||||||
switch(type) {
|
switch(type) {
|
||||||
case R_AARCH64_ABS64:
|
case R_AARCH64_ABS64:
|
||||||
write64le(ptr, val);
|
add64le(ptr, val);
|
||||||
return;
|
return;
|
||||||
case R_AARCH64_ABS32:
|
case R_AARCH64_ABS32:
|
||||||
write32le(ptr, val);
|
add32le(ptr, val);
|
||||||
return;
|
return;
|
||||||
case R_AARCH64_PREL32:
|
case R_AARCH64_PREL32:
|
||||||
write32le(ptr, val - addr);
|
write32le(ptr, val - addr);
|
||||||
@ -193,6 +195,10 @@ void relocate(TCCState *s1, ElfW_Rel *rel, int type, unsigned char *ptr, addr_t
|
|||||||
write32le(ptr, ((read32le(ptr) & 0xffc003ff) |
|
write32le(ptr, ((read32le(ptr) & 0xffc003ff) |
|
||||||
(val & 0xfff) << 10));
|
(val & 0xfff) << 10));
|
||||||
return;
|
return;
|
||||||
|
case R_AARCH64_LDST64_ABS_LO12_NC:
|
||||||
|
write32le(ptr, ((read32le(ptr) & 0xffc003ff) |
|
||||||
|
(val & 0xff8) << 7));
|
||||||
|
return;
|
||||||
case R_AARCH64_JUMP26:
|
case R_AARCH64_JUMP26:
|
||||||
case R_AARCH64_CALL26:
|
case R_AARCH64_CALL26:
|
||||||
#ifdef DEBUG_RELOC
|
#ifdef DEBUG_RELOC
|
||||||
|
@ -1089,8 +1089,9 @@ static void gen_bounds_epilog(void)
|
|||||||
addr_t saved_ind;
|
addr_t saved_ind;
|
||||||
addr_t *bounds_ptr;
|
addr_t *bounds_ptr;
|
||||||
Sym *sym_data;
|
Sym *sym_data;
|
||||||
|
int offset_modified = func_bound_offset != lbounds_section->data_offset;
|
||||||
|
|
||||||
if (func_bound_offset == lbounds_section->data_offset && !func_bound_add_epilog)
|
if (!offset_modified && !func_bound_add_epilog)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* add end of table info */
|
/* add end of table info */
|
||||||
@ -1101,7 +1102,7 @@ static void gen_bounds_epilog(void)
|
|||||||
func_bound_offset, lbounds_section->data_offset);
|
func_bound_offset, lbounds_section->data_offset);
|
||||||
|
|
||||||
/* generate bound local allocation */
|
/* generate bound local allocation */
|
||||||
if (func_bound_offset != lbounds_section->data_offset) {
|
if (offset_modified) {
|
||||||
saved_ind = ind;
|
saved_ind = ind;
|
||||||
ind = func_bound_ind;
|
ind = func_bound_ind;
|
||||||
greloc(cur_text_section, sym_data, ind + 1, R_386_32);
|
greloc(cur_text_section, sym_data, ind + 1, R_386_32);
|
||||||
|
12
lib/Makefile
12
lib/Makefile
@ -43,9 +43,9 @@ endif
|
|||||||
|
|
||||||
I386_O = libtcc1.o alloca86.o alloca86-bt.o $(BT_O)
|
I386_O = libtcc1.o alloca86.o alloca86-bt.o $(BT_O)
|
||||||
X86_64_O = libtcc1.o alloca86_64.o alloca86_64-bt.o $(BT_O)
|
X86_64_O = libtcc1.o alloca86_64.o alloca86_64-bt.o $(BT_O)
|
||||||
ARM_O = libtcc1.o armeabi.o alloca-arm.o armflush.o $(BT_O)
|
ARM_O = libtcc1.o armeabi.o alloca-arm.o armflush.o fetch_and_add_arm.o $(BT_O)
|
||||||
ARM64_O = lib-arm64.o $(BT_O)
|
ARM64_O = lib-arm64.o fetch_and_add_arm64.o $(BT_O)
|
||||||
RISCV64_O = lib-arm64.o $(BT_O)
|
RISCV64_O = lib-arm64.o fetch_and_add_riscv64.o $(BT_O)
|
||||||
WIN_O = crt1.o crt1w.o wincrt1.o wincrt1w.o dllcrt1.o dllmain.o
|
WIN_O = crt1.o crt1w.o wincrt1.o wincrt1w.o dllcrt1.o dllmain.o
|
||||||
|
|
||||||
OBJ-i386 = $(I386_O) $(BCHECK_O) $(DSO_O)
|
OBJ-i386 = $(I386_O) $(BCHECK_O) $(DSO_O)
|
||||||
@ -53,15 +53,15 @@ OBJ-x86_64 = $(X86_64_O) va_list.o $(BCHECK_O) $(DSO_O)
|
|||||||
OBJ-x86_64-osx = $(X86_64_O) va_list.o
|
OBJ-x86_64-osx = $(X86_64_O) va_list.o
|
||||||
OBJ-i386-win32 = $(I386_O) chkstk.o $(B_O) $(WIN_O)
|
OBJ-i386-win32 = $(I386_O) chkstk.o $(B_O) $(WIN_O)
|
||||||
OBJ-x86_64-win32 = $(X86_64_O) chkstk.o $(B_O) $(WIN_O)
|
OBJ-x86_64-win32 = $(X86_64_O) chkstk.o $(B_O) $(WIN_O)
|
||||||
OBJ-arm64 = $(ARM64_O) $(DSO_O)
|
OBJ-arm64 = $(ARM64_O) $(BCHECK_O) $(DSO_O)
|
||||||
OBJ-arm = $(ARM_O) $(DSO_O)
|
OBJ-arm = $(ARM_O) $(BCHECK_O) $(DSO_O)
|
||||||
OBJ-arm-fpa = $(ARM_O) $(DSO_O)
|
OBJ-arm-fpa = $(ARM_O) $(DSO_O)
|
||||||
OBJ-arm-fpa-ld = $(ARM_O) $(DSO_O)
|
OBJ-arm-fpa-ld = $(ARM_O) $(DSO_O)
|
||||||
OBJ-arm-vfp = $(ARM_O) $(DSO_O)
|
OBJ-arm-vfp = $(ARM_O) $(DSO_O)
|
||||||
OBJ-arm-eabi = $(ARM_O) $(DSO_O)
|
OBJ-arm-eabi = $(ARM_O) $(DSO_O)
|
||||||
OBJ-arm-eabihf = $(ARM_O) $(DSO_O)
|
OBJ-arm-eabihf = $(ARM_O) $(DSO_O)
|
||||||
OBJ-arm-wince = $(ARM_O) $(WIN_O)
|
OBJ-arm-wince = $(ARM_O) $(WIN_O)
|
||||||
OBJ-riscv64 = $(RISCV64_O) $(DSO_O)
|
OBJ-riscv64 = $(RISCV64_O) $(BCHECK_O) $(DSO_O)
|
||||||
|
|
||||||
OBJ-extra = $(filter $(B_O),$(OBJ-$T))
|
OBJ-extra = $(filter $(B_O),$(OBJ-$T))
|
||||||
OBJ-libtcc1 = $(addprefix $(X),$(filter-out $(OBJ-extra),$(OBJ-$T)))
|
OBJ-libtcc1 = $(addprefix $(X),$(filter-out $(OBJ-extra),$(OBJ-$T)))
|
||||||
|
97
lib/bcheck.c
97
lib/bcheck.c
@ -163,7 +163,7 @@ typedef struct alloca_list_struct {
|
|||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
#define BOUND_TID_TYPE DWORD
|
#define BOUND_TID_TYPE DWORD
|
||||||
#define BOUND_GET_TID GetCurrentThreadId()
|
#define BOUND_GET_TID GetCurrentThreadId()
|
||||||
#elif defined(__i386__) || defined(__x86_64__)
|
#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || defined(__riscv)
|
||||||
#define BOUND_TID_TYPE pid_t
|
#define BOUND_TID_TYPE pid_t
|
||||||
#define BOUND_GET_TID syscall (SYS_gettid)
|
#define BOUND_GET_TID syscall (SYS_gettid)
|
||||||
#else
|
#else
|
||||||
@ -223,6 +223,19 @@ DLL_EXPORT char *__bound_strcat(char *dest, const char *src);
|
|||||||
DLL_EXPORT char *__bound_strchr(const char *string, int ch);
|
DLL_EXPORT char *__bound_strchr(const char *string, int ch);
|
||||||
DLL_EXPORT char *__bound_strdup(const char *s);
|
DLL_EXPORT char *__bound_strdup(const char *s);
|
||||||
|
|
||||||
|
#if defined(__arm__)
|
||||||
|
DLL_EXPORT void *__bound___aeabi_memcpy(void *dst, const void *src, size_t size);
|
||||||
|
DLL_EXPORT void *__bound___aeabi_memmove(void *dst, const void *src, size_t size);
|
||||||
|
DLL_EXPORT void *__bound___aeabi_memmove4(void *dst, const void *src, size_t size);
|
||||||
|
DLL_EXPORT void *__bound___aeabi_memmove8(void *dst, const void *src, size_t size);
|
||||||
|
DLL_EXPORT void *__bound___aeabi_memset(void *dst, int c, size_t size);
|
||||||
|
DLL_EXPORT void *__aeabi_memcpy(void *dst, const void *src, size_t size);
|
||||||
|
DLL_EXPORT void *__aeabi_memmove(void *dst, const void *src, size_t size);
|
||||||
|
DLL_EXPORT void *__aeabi_memmove4(void *dst, const void *src, size_t size);
|
||||||
|
DLL_EXPORT void *__aeabi_memmove8(void *dst, const void *src, size_t size);
|
||||||
|
DLL_EXPORT void *__aeabi_memset(void *dst, int c, size_t size);
|
||||||
|
#endif
|
||||||
|
|
||||||
#if MALLOC_REDIR
|
#if MALLOC_REDIR
|
||||||
#define BOUND_MALLOC(a) malloc_redir(a)
|
#define BOUND_MALLOC(a) malloc_redir(a)
|
||||||
#define BOUND_MEMALIGN(a,b) memalign_redir(a,b)
|
#define BOUND_MEMALIGN(a,b) memalign_redir(a,b)
|
||||||
@ -320,6 +333,15 @@ static void fetch_and_add(signed char* variable, signed char value)
|
|||||||
: // No input-only
|
: // No input-only
|
||||||
: "memory"
|
: "memory"
|
||||||
);
|
);
|
||||||
|
#elif defined __arm__
|
||||||
|
extern fetch_and_add_arm(signed char* variable, signed char value);
|
||||||
|
fetch_and_add_arm(variable, value);
|
||||||
|
#elif defined __aarch64__
|
||||||
|
extern fetch_and_add_arm64(signed char* variable, signed char value);
|
||||||
|
fetch_and_add_arm64(variable, value);
|
||||||
|
#elif defined __riscv
|
||||||
|
extern fetch_and_add_riscv64(signed char* variable, signed char value);
|
||||||
|
fetch_and_add_riscv64(variable, value);
|
||||||
#else
|
#else
|
||||||
*variable += value;
|
*variable += value;
|
||||||
#endif
|
#endif
|
||||||
@ -814,10 +836,10 @@ void __bound_init(size_t *p)
|
|||||||
{
|
{
|
||||||
FILE *fp;
|
FILE *fp;
|
||||||
unsigned char found;
|
unsigned char found;
|
||||||
unsigned long long start;
|
unsigned long start;
|
||||||
unsigned long long end;
|
unsigned long end;
|
||||||
unsigned long long ad =
|
unsigned long ad =
|
||||||
(unsigned long long) __builtin_return_address(0);
|
(unsigned long) __builtin_return_address(0);
|
||||||
char line[1000];
|
char line[1000];
|
||||||
|
|
||||||
/* Display exec name. Usefull when a lot of code is compiled with tcc */
|
/* Display exec name. Usefull when a lot of code is compiled with tcc */
|
||||||
@ -835,7 +857,7 @@ void __bound_init(size_t *p)
|
|||||||
fp = fopen ("/proc/self/maps", "r");
|
fp = fopen ("/proc/self/maps", "r");
|
||||||
if (fp) {
|
if (fp) {
|
||||||
while (fgets (line, sizeof(line), fp)) {
|
while (fgets (line, sizeof(line), fp)) {
|
||||||
if (sscanf (line, "%Lx-%Lx", &start, &end) == 2 &&
|
if (sscanf (line, "%lx-%lx", &start, &end) == 2 &&
|
||||||
ad >= start && ad < end) {
|
ad >= start && ad < end) {
|
||||||
found = 1;
|
found = 1;
|
||||||
break;
|
break;
|
||||||
@ -1099,7 +1121,7 @@ void *__bound_malloc(size_t size, const void *caller)
|
|||||||
INCR_COUNT(bound_malloc_count);
|
INCR_COUNT(bound_malloc_count);
|
||||||
|
|
||||||
if (ptr) {
|
if (ptr) {
|
||||||
tree = splay_insert ((size_t) ptr, size, tree);
|
tree = splay_insert ((size_t) ptr, size ? size : size + 1, tree);
|
||||||
if (tree && tree->start == (size_t) ptr)
|
if (tree && tree->start == (size_t) ptr)
|
||||||
tree->type = TCC_TYPE_MALLOC;
|
tree->type = TCC_TYPE_MALLOC;
|
||||||
}
|
}
|
||||||
@ -1138,7 +1160,7 @@ void *__bound_memalign(size_t size, size_t align, const void *caller)
|
|||||||
INCR_COUNT(bound_memalign_count);
|
INCR_COUNT(bound_memalign_count);
|
||||||
|
|
||||||
if (ptr) {
|
if (ptr) {
|
||||||
tree = splay_insert((size_t) ptr, size, tree);
|
tree = splay_insert((size_t) ptr, size ? size : size + 1, tree);
|
||||||
if (tree && tree->start == (size_t) ptr)
|
if (tree && tree->start == (size_t) ptr)
|
||||||
tree->type = TCC_TYPE_MEMALIGN;
|
tree->type = TCC_TYPE_MEMALIGN;
|
||||||
}
|
}
|
||||||
@ -1205,7 +1227,7 @@ void *__bound_realloc(void *ptr, size_t size, const void *caller)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
new_ptr = BOUND_REALLOC (ptr, size);
|
new_ptr = BOUND_REALLOC (ptr, size + 1);
|
||||||
dprintf(stderr, "%s, %s(): %p, 0x%lx\n",
|
dprintf(stderr, "%s, %s(): %p, 0x%lx\n",
|
||||||
__FILE__, __FUNCTION__, new_ptr, (unsigned long)size);
|
__FILE__, __FUNCTION__, new_ptr, (unsigned long)size);
|
||||||
|
|
||||||
@ -1216,7 +1238,7 @@ void *__bound_realloc(void *ptr, size_t size, const void *caller)
|
|||||||
if (ptr)
|
if (ptr)
|
||||||
tree = splay_delete ((size_t) ptr, tree);
|
tree = splay_delete ((size_t) ptr, tree);
|
||||||
if (new_ptr) {
|
if (new_ptr) {
|
||||||
tree = splay_insert ((size_t) new_ptr, size, tree);
|
tree = splay_insert ((size_t) new_ptr, size ? size : size + 1, tree);
|
||||||
if (tree && tree->start == (size_t) new_ptr)
|
if (tree && tree->start == (size_t) new_ptr)
|
||||||
tree->type = TCC_TYPE_REALLOC;
|
tree->type = TCC_TYPE_REALLOC;
|
||||||
}
|
}
|
||||||
@ -1259,7 +1281,7 @@ void *__bound_calloc(size_t nmemb, size_t size)
|
|||||||
if (no_checking == 0) {
|
if (no_checking == 0) {
|
||||||
WAIT_SEM ();
|
WAIT_SEM ();
|
||||||
INCR_COUNT(bound_calloc_count);
|
INCR_COUNT(bound_calloc_count);
|
||||||
tree = splay_insert ((size_t) ptr, size, tree);
|
tree = splay_insert ((size_t) ptr, size ? size : size + 1, tree);
|
||||||
if (tree && tree->start == (size_t) ptr)
|
if (tree && tree->start == (size_t) ptr)
|
||||||
tree->type = TCC_TYPE_CALLOC;
|
tree->type = TCC_TYPE_CALLOC;
|
||||||
POST_SEM ();
|
POST_SEM ();
|
||||||
@ -1387,6 +1409,59 @@ void *__bound_memset(void *s, int c, size_t n)
|
|||||||
return memset(s, c, n);
|
return memset(s, c, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(__arm__)
|
||||||
|
void *__bound___aeabi_memcpy(void *dest, const void *src, size_t n)
|
||||||
|
{
|
||||||
|
dprintf(stderr, "%s, %s(): %p, %p, 0x%lx\n",
|
||||||
|
__FILE__, __FUNCTION__, dest, src, (unsigned long)n);
|
||||||
|
INCR_COUNT(bound_mempcy_count);
|
||||||
|
__bound_check(dest, n, "memcpy dest");
|
||||||
|
__bound_check(src, n, "memcpy src");
|
||||||
|
if (check_overlap(dest, n, src, n, "memcpy"))
|
||||||
|
return dest;
|
||||||
|
return __aeabi_memcpy(dest, src, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__bound___aeabi_memmove(void *dest, const void *src, size_t n)
|
||||||
|
{
|
||||||
|
dprintf(stderr, "%s, %s(): %p, %p, 0x%lx\n",
|
||||||
|
__FILE__, __FUNCTION__, dest, src, (unsigned long)n);
|
||||||
|
INCR_COUNT(bound_memmove_count);
|
||||||
|
__bound_check(dest, n, "memmove dest");
|
||||||
|
__bound_check(src, n, "memmove src");
|
||||||
|
return __aeabi_memmove(dest, src, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__bound___aeabi_memmove4(void *dest, const void *src, size_t n)
|
||||||
|
{
|
||||||
|
dprintf(stderr, "%s, %s(): %p, %p, 0x%lx\n",
|
||||||
|
__FILE__, __FUNCTION__, dest, src, (unsigned long)n);
|
||||||
|
INCR_COUNT(bound_memmove_count);
|
||||||
|
__bound_check(dest, n, "memmove dest");
|
||||||
|
__bound_check(src, n, "memmove src");
|
||||||
|
return __aeabi_memmove4(dest, src, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__bound___aeabi_memmove8(void *dest, const void *src, size_t n)
|
||||||
|
{
|
||||||
|
dprintf(stderr, "%s, %s(): %p, %p, 0x%lx\n",
|
||||||
|
__FILE__, __FUNCTION__, dest, src, (unsigned long)n);
|
||||||
|
INCR_COUNT(bound_memmove_count);
|
||||||
|
__bound_check(dest, n, "memmove dest");
|
||||||
|
__bound_check(src, n, "memmove src");
|
||||||
|
return __aeabi_memmove8(dest, src, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *__bound___aeabi_memset(void *s, int c, size_t n)
|
||||||
|
{
|
||||||
|
dprintf(stderr, "%s, %s(): %p, %d, 0x%lx\n",
|
||||||
|
__FILE__, __FUNCTION__, s, c, (unsigned long)n);
|
||||||
|
INCR_COUNT(bound_memset_count);
|
||||||
|
__bound_check(s, n, "memset");
|
||||||
|
return __aeabi_memset(s, c, n);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int __bound_strlen(const char *s)
|
int __bound_strlen(const char *s)
|
||||||
{
|
{
|
||||||
const char *p = s;
|
const char *p = s;
|
||||||
|
16
lib/fetch_and_add_arm.S
Normal file
16
lib/fetch_and_add_arm.S
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
.text
|
||||||
|
.align 2
|
||||||
|
.global fetch_and_add_arm
|
||||||
|
.type fetch_and_add_arm, %function
|
||||||
|
fetch_and_add_arm:
|
||||||
|
.int 0xee070fba # mcr 15, 0, r0, cr7, cr10, {5}
|
||||||
|
.int 0xe1903f9f # ldrex r3, [r0]
|
||||||
|
.int 0xe2833001 # add r3, r3, #1
|
||||||
|
.int 0xe1802f93 # strex r2, r3, [r0]
|
||||||
|
.int 0xe3520000 # cmp r2, #0
|
||||||
|
.int 0x1afffffa # bne 4 <fetch_and_add_arm+0x4>
|
||||||
|
.int 0xee070fba # mcr 15, 0, r0, cr7, cr10, {5}
|
||||||
|
.int 0xe1a00003 # mov r0, r3
|
||||||
|
.int 0xe12fff1e # bx lr
|
||||||
|
|
||||||
|
.size fetch_and_add_arm, .-fetch_and_add_arm
|
14
lib/fetch_and_add_arm64.S
Normal file
14
lib/fetch_and_add_arm64.S
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
.text
|
||||||
|
.align 2
|
||||||
|
.global fetch_and_add_arm64
|
||||||
|
.type fetch_and_add_arm64, %function
|
||||||
|
fetch_and_add_arm64:
|
||||||
|
.int 0x885f7c01 # ldxr w1, [x0]
|
||||||
|
.int 0x11000421 # add w1, w1, #0x1
|
||||||
|
.int 0x8802fc01 # stlxr w2, w1, [x0]
|
||||||
|
.int 0x35ffffa2 # cbnz w2, 0 <fetch_and_add_arm64>
|
||||||
|
.int 0xd5033bbf # dmb ish
|
||||||
|
.int 0x2a0103e0 # mov w0, w1
|
||||||
|
.int 0xd65f03c0 # ret
|
||||||
|
|
||||||
|
.size fetch_and_add_arm64, .-fetch_and_add_arm64
|
12
lib/fetch_and_add_riscv64.S
Normal file
12
lib/fetch_and_add_riscv64.S
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
.text
|
||||||
|
.align 2
|
||||||
|
.global fetch_and_add_riscv64
|
||||||
|
.type fetch_and_add_riscv64, %function
|
||||||
|
fetch_and_add_riscv64:
|
||||||
|
.short 0x4705 # li a4,1
|
||||||
|
.int 0x0f50000f # fence iorw,ow
|
||||||
|
.int 0x04e527af # amoadd.w.aq a5,a4,(a0)
|
||||||
|
.int 0x0017851b # addiw a0,a5,1
|
||||||
|
.short 0x8082 # ret
|
||||||
|
|
||||||
|
.size fetch_and_add_riscv64, .-fetch_and_add_riscv64
|
2
libtcc.c
2
libtcc.c
@ -34,6 +34,7 @@
|
|||||||
#elif defined(TCC_TARGET_ARM64)
|
#elif defined(TCC_TARGET_ARM64)
|
||||||
#include "arm64-gen.c"
|
#include "arm64-gen.c"
|
||||||
#include "arm64-link.c"
|
#include "arm64-link.c"
|
||||||
|
#include "arm-asm.c"
|
||||||
#elif defined(TCC_TARGET_C67)
|
#elif defined(TCC_TARGET_C67)
|
||||||
#include "c67-gen.c"
|
#include "c67-gen.c"
|
||||||
#include "c67-link.c"
|
#include "c67-link.c"
|
||||||
@ -45,6 +46,7 @@
|
|||||||
#elif defined(TCC_TARGET_RISCV64)
|
#elif defined(TCC_TARGET_RISCV64)
|
||||||
#include "riscv64-gen.c"
|
#include "riscv64-gen.c"
|
||||||
#include "riscv64-link.c"
|
#include "riscv64-link.c"
|
||||||
|
#include "riscv64-asm.c"
|
||||||
#else
|
#else
|
||||||
#error unknown target
|
#error unknown target
|
||||||
#endif
|
#endif
|
||||||
|
94
riscv64-asm.c
Normal file
94
riscv64-asm.c
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
/*************************************************************/
|
||||||
|
/*
|
||||||
|
* RISCV64 dummy assembler for TCC
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef TARGET_DEFS_ONLY
|
||||||
|
|
||||||
|
#define CONFIG_TCC_ASM
|
||||||
|
#define NB_ASM_REGS 32
|
||||||
|
|
||||||
|
ST_FUNC void g(int c);
|
||||||
|
ST_FUNC void gen_le16(int c);
|
||||||
|
ST_FUNC void gen_le32(int c);
|
||||||
|
|
||||||
|
/*************************************************************/
|
||||||
|
#else
|
||||||
|
/*************************************************************/
|
||||||
|
#define USING_GLOBALS
|
||||||
|
#include "tcc.h"
|
||||||
|
|
||||||
|
static void asm_error(void)
|
||||||
|
{
|
||||||
|
tcc_error("RISCV64 asm not implemented.");
|
||||||
|
}
|
||||||
|
|
||||||
|
/* XXX: make it faster ? */
|
||||||
|
ST_FUNC void g(int c)
|
||||||
|
{
|
||||||
|
int ind1;
|
||||||
|
if (nocode_wanted)
|
||||||
|
return;
|
||||||
|
ind1 = ind + 1;
|
||||||
|
if (ind1 > cur_text_section->data_allocated)
|
||||||
|
section_realloc(cur_text_section, ind1);
|
||||||
|
cur_text_section->data[ind] = c;
|
||||||
|
ind = ind1;
|
||||||
|
}
|
||||||
|
|
||||||
|
ST_FUNC void gen_le16 (int i)
|
||||||
|
{
|
||||||
|
g(i);
|
||||||
|
g(i>>8);
|
||||||
|
}
|
||||||
|
|
||||||
|
ST_FUNC void gen_le32 (int i)
|
||||||
|
{
|
||||||
|
gen_le16(i);
|
||||||
|
gen_le16(i>>16);
|
||||||
|
}
|
||||||
|
|
||||||
|
ST_FUNC void gen_expr32(ExprValue *pe)
|
||||||
|
{
|
||||||
|
gen_le32(pe->v);
|
||||||
|
}
|
||||||
|
|
||||||
|
ST_FUNC void asm_opcode(TCCState *s1, int opcode)
|
||||||
|
{
|
||||||
|
asm_error();
|
||||||
|
}
|
||||||
|
|
||||||
|
ST_FUNC void subst_asm_operand(CString *add_str, SValue *sv, int modifier)
|
||||||
|
{
|
||||||
|
asm_error();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate prolog and epilog code for asm statement */
|
||||||
|
ST_FUNC void asm_gen_code(ASMOperand *operands, int nb_operands,
|
||||||
|
int nb_outputs, int is_output,
|
||||||
|
uint8_t *clobber_regs,
|
||||||
|
int out_reg)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
ST_FUNC void asm_compute_constraints(ASMOperand *operands,
|
||||||
|
int nb_operands, int nb_outputs,
|
||||||
|
const uint8_t *clobber_regs,
|
||||||
|
int *pout_reg)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
ST_FUNC void asm_clobber(uint8_t *clobber_regs, const char *str)
|
||||||
|
{
|
||||||
|
asm_error();
|
||||||
|
}
|
||||||
|
|
||||||
|
ST_FUNC int asm_parse_regvar (int t)
|
||||||
|
{
|
||||||
|
asm_error();
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*************************************************************/
|
||||||
|
#endif /* ndef TARGET_DEFS_ONLY */
|
155
riscv64-gen.c
155
riscv64-gen.c
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
// Number of registers available to allocator:
|
// Number of registers available to allocator:
|
||||||
#define NB_REGS 19 // x10-x17 aka a0-a7, f10-f17 aka fa0-fa7, xxx, ra, sp
|
#define NB_REGS 19 // x10-x17 aka a0-a7, f10-f17 aka fa0-fa7, xxx, ra, sp
|
||||||
|
#define NB_ASM_REGS 32
|
||||||
|
#define CONFIG_TCC_ASM
|
||||||
|
|
||||||
#define TREG_R(x) (x) // x = 0..7
|
#define TREG_R(x) (x) // x = 0..7
|
||||||
#define TREG_F(x) (x + 8) // x = 0..7
|
#define TREG_F(x) (x + 8) // x = 0..7
|
||||||
@ -61,6 +63,12 @@ ST_DATA const int reg_classes[NB_REGS] = {
|
|||||||
1 << TREG_SP
|
1 << TREG_SP
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if defined(CONFIG_TCC_BCHECK)
|
||||||
|
static addr_t func_bound_offset;
|
||||||
|
static unsigned long func_bound_ind;
|
||||||
|
static int func_bound_add_epilog;
|
||||||
|
#endif
|
||||||
|
|
||||||
static int ireg(int r)
|
static int ireg(int r)
|
||||||
{
|
{
|
||||||
if (r == TREG_RA)
|
if (r == TREG_RA)
|
||||||
@ -377,6 +385,14 @@ static void gcall_or_jmp(int docall)
|
|||||||
R_RISCV_CALL_PLT, (int)vtop->c.i);
|
R_RISCV_CALL_PLT, (int)vtop->c.i);
|
||||||
o(0x17 | (tr << 7)); // auipc TR, 0 %call(func)
|
o(0x17 | (tr << 7)); // auipc TR, 0 %call(func)
|
||||||
EI(0x67, 0, tr, tr, 0);// jalr TR, r(TR)
|
EI(0x67, 0, tr, tr, 0);// jalr TR, r(TR)
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check &&
|
||||||
|
(vtop->sym->v == TOK_setjmp ||
|
||||||
|
vtop->sym->v == TOK__setjmp ||
|
||||||
|
vtop->sym->v == TOK_sigsetjmp ||
|
||||||
|
vtop->sym->v == TOK___sigsetjmp))
|
||||||
|
func_bound_add_epilog = 1;
|
||||||
|
#endif
|
||||||
} else if (vtop->r < VT_CONST) {
|
} else if (vtop->r < VT_CONST) {
|
||||||
int r = ireg(vtop->r);
|
int r = ireg(vtop->r);
|
||||||
EI(0x67, 0, tr, r, 0); // jalr TR, 0(R)
|
EI(0x67, 0, tr, r, 0); // jalr TR, 0(R)
|
||||||
@ -388,6 +404,130 @@ static void gcall_or_jmp(int docall)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_TCC_BCHECK)
|
||||||
|
|
||||||
|
static void gen_bounds_call(int v)
|
||||||
|
{
|
||||||
|
Sym *sym = external_global_sym(v, &func_old_type);
|
||||||
|
|
||||||
|
greloca(cur_text_section, sym, ind, R_RISCV_CALL_PLT, 0);
|
||||||
|
o(0x17 | (1 << 7)); // auipc TR, 0 %call(func)
|
||||||
|
EI(0x67, 0, 1, 1, 0); // jalr TR, r(TR)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate a bounded pointer addition */
|
||||||
|
ST_FUNC void gen_bounded_ptr_add(void)
|
||||||
|
{
|
||||||
|
vpush_global_sym(&func_old_type, TOK___bound_ptr_add);
|
||||||
|
vrott(3);
|
||||||
|
gfunc_call(2);
|
||||||
|
vpushi(0);
|
||||||
|
/* returned pointer is in REG_IRET */
|
||||||
|
vtop->r = REG_IRET | VT_BOUNDED;
|
||||||
|
if (nocode_wanted)
|
||||||
|
return;
|
||||||
|
/* relocation offset of the bounding function call point */
|
||||||
|
vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* patch pointer addition in vtop so that pointer dereferencing is
|
||||||
|
also tested */
|
||||||
|
ST_FUNC void gen_bounded_ptr_deref(void)
|
||||||
|
{
|
||||||
|
addr_t func;
|
||||||
|
int size, align;
|
||||||
|
ElfW(Rela) *rel;
|
||||||
|
Sym *sym;
|
||||||
|
|
||||||
|
if (nocode_wanted)
|
||||||
|
return;
|
||||||
|
|
||||||
|
size = type_size(&vtop->type, &align);
|
||||||
|
switch(size) {
|
||||||
|
case 1: func = TOK___bound_ptr_indir1; break;
|
||||||
|
case 2: func = TOK___bound_ptr_indir2; break;
|
||||||
|
case 4: func = TOK___bound_ptr_indir4; break;
|
||||||
|
case 8: func = TOK___bound_ptr_indir8; break;
|
||||||
|
case 12: func = TOK___bound_ptr_indir12; break;
|
||||||
|
case 16: func = TOK___bound_ptr_indir16; break;
|
||||||
|
default:
|
||||||
|
/* may happen with struct member access */
|
||||||
|
return;
|
||||||
|
//tcc_error("unhandled size when dereferencing bounded pointer");
|
||||||
|
//func = 0;
|
||||||
|
//break;
|
||||||
|
}
|
||||||
|
sym = external_global_sym(func, &func_old_type);
|
||||||
|
if (!sym->c)
|
||||||
|
put_extern_sym(sym, NULL, 0, 0);
|
||||||
|
/* patch relocation */
|
||||||
|
/* XXX: find a better solution ? */
|
||||||
|
rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
|
||||||
|
rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gen_bounds_prolog(void)
|
||||||
|
{
|
||||||
|
/* leave some room for bound checking code */
|
||||||
|
func_bound_offset = lbounds_section->data_offset;
|
||||||
|
func_bound_ind = ind;
|
||||||
|
func_bound_add_epilog = 0;
|
||||||
|
o(0x00000013); /* ld a0,#lbound section pointer */
|
||||||
|
o(0x00000013);
|
||||||
|
o(0x00000013); /* nop -> call __bound_local_new */
|
||||||
|
o(0x00000013);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gen_bounds_epilog(void)
|
||||||
|
{
|
||||||
|
static Sym label;
|
||||||
|
addr_t saved_ind;
|
||||||
|
addr_t *bounds_ptr;
|
||||||
|
Sym *sym_data;
|
||||||
|
int offset_modified = func_bound_offset != lbounds_section->data_offset;
|
||||||
|
|
||||||
|
if (!offset_modified && !func_bound_add_epilog)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* add end of table info */
|
||||||
|
bounds_ptr = section_ptr_add(lbounds_section, sizeof(addr_t));
|
||||||
|
*bounds_ptr = 0;
|
||||||
|
|
||||||
|
sym_data = get_sym_ref(&char_pointer_type, lbounds_section,
|
||||||
|
func_bound_offset, lbounds_section->data_offset);
|
||||||
|
|
||||||
|
if (!label.v) {
|
||||||
|
label.v = tok_alloc(".LB0 ", 4)->tok;
|
||||||
|
label.type.t = VT_VOID | VT_STATIC;
|
||||||
|
}
|
||||||
|
/* generate bound local allocation */
|
||||||
|
if (offset_modified) {
|
||||||
|
saved_ind = ind;
|
||||||
|
ind = func_bound_ind;
|
||||||
|
label.c = 0; /* force new local ELF symbol */
|
||||||
|
put_extern_sym(&label, cur_text_section, ind, 0);
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_RISCV_GOT_HI20, 0);
|
||||||
|
o(0x17 | (10 << 7)); // auipc a0, 0 %pcrel_hi(sym)+addend
|
||||||
|
greloca(cur_text_section, &label, ind, R_RISCV_PCREL_LO12_I, 0);
|
||||||
|
EI(0x03, 3, 10, 10, 0); // ld a0, 0(a0)
|
||||||
|
gen_bounds_call(TOK___bound_local_new);
|
||||||
|
ind = saved_ind;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* generate bound check local freeing */
|
||||||
|
o(0xe02a1101); /* addi sp,sp,-32 sd a0,0(sp) */
|
||||||
|
o(0xa82ae42e); /* sd a1,8(sp) fsd fa0,16(sp) */
|
||||||
|
label.c = 0; /* force new local ELF symbol */
|
||||||
|
put_extern_sym(&label, cur_text_section, ind, 0);
|
||||||
|
greloca(cur_text_section, sym_data, ind, R_RISCV_GOT_HI20, 0);
|
||||||
|
o(0x17 | (10 << 7)); // auipc a0, 0 %pcrel_hi(sym)+addend
|
||||||
|
greloca(cur_text_section, &label, ind, R_RISCV_PCREL_LO12_I, 0);
|
||||||
|
EI(0x03, 3, 10, 10, 0); // ld a0, 0(a0)
|
||||||
|
gen_bounds_call(TOK___bound_local_delete);
|
||||||
|
o(0x65a26502); /* ld a0,0(sp) ld a1,8(sp) */
|
||||||
|
o(0x61052542); /* fld fa0,16(sp) addi sp,sp,32 */
|
||||||
|
}
|
||||||
|
#endif
|
||||||
static void reg_pass_rec(CType *type, int *rc, int *fieldofs, int ofs)
|
static void reg_pass_rec(CType *type, int *rc, int *fieldofs, int ofs)
|
||||||
{
|
{
|
||||||
if ((type->t & VT_BTYPE) == VT_STRUCT) {
|
if ((type->t & VT_BTYPE) == VT_STRUCT) {
|
||||||
@ -440,6 +580,12 @@ ST_FUNC void gfunc_call(int nb_args)
|
|||||||
int stack_adj = 0, tempspace = 0, ofs, splitofs = 0;
|
int stack_adj = 0, tempspace = 0, ofs, splitofs = 0;
|
||||||
SValue *sv;
|
SValue *sv;
|
||||||
Sym *sa;
|
Sym *sa;
|
||||||
|
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gbound_args(nb_args);
|
||||||
|
#endif
|
||||||
|
|
||||||
areg[0] = 0; /* int arg regs */
|
areg[0] = 0; /* int arg regs */
|
||||||
areg[1] = 8; /* float arg regs */
|
areg[1] = 8; /* float arg regs */
|
||||||
sa = vtop[-nb_args].type.ref->next;
|
sa = vtop[-nb_args].type.ref->next;
|
||||||
@ -691,6 +837,10 @@ ST_FUNC void gfunc_prolog(Sym *func_sym)
|
|||||||
ES(0x23, 3, 8, 10 + areg[0], -8 + num_va_regs * 8); // sd aX, loc(s0)
|
ES(0x23, 3, 8, 10 + areg[0], -8 + num_va_regs * 8); // sd aX, loc(s0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gen_bounds_prolog();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret,
|
ST_FUNC int gfunc_sret(CType *vt, int variadic, CType *ret,
|
||||||
@ -732,6 +882,11 @@ ST_FUNC void gfunc_epilog(void)
|
|||||||
{
|
{
|
||||||
int v, saved_ind, d, large_ofs_ind;
|
int v, saved_ind, d, large_ofs_ind;
|
||||||
|
|
||||||
|
#ifdef CONFIG_TCC_BCHECK
|
||||||
|
if (tcc_state->do_bounds_check)
|
||||||
|
gen_bounds_epilog();
|
||||||
|
#endif
|
||||||
|
|
||||||
loc = (loc - num_va_regs * 8);
|
loc = (loc - num_va_regs * 8);
|
||||||
d = v = (-loc + 15) & -16;
|
d = v = (-loc + 15) & -16;
|
||||||
|
|
||||||
|
@ -284,11 +284,11 @@ void relocate(TCCState *s1, ElfW_Rel *rel, int type, unsigned char *ptr,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
case R_RISCV_32:
|
case R_RISCV_32:
|
||||||
write32le(ptr, val);
|
add32le(ptr, val);
|
||||||
return;
|
return;
|
||||||
case R_RISCV_JUMP_SLOT:
|
case R_RISCV_JUMP_SLOT:
|
||||||
case R_RISCV_64:
|
case R_RISCV_64:
|
||||||
write64le(ptr, val);
|
add64le(ptr, val);
|
||||||
return;
|
return;
|
||||||
case R_RISCV_ADD64:
|
case R_RISCV_ADD64:
|
||||||
write64le(ptr, read64le(ptr) + val);
|
write64le(ptr, read64le(ptr) + val);
|
||||||
|
@ -370,7 +370,7 @@ Print statistic information at exit of program.
|
|||||||
Try to continue in case of a bound checking error.
|
Try to continue in case of a bound checking error.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
Note: @option{-b} is only available on i386 (linux and windows) and x86_64 (linux and windows) for the moment.
|
Note: @option{-b} is only available on i386 (linux and windows), x86_64 (linux and windows), arm, arm64 and riscv64 for the moment.
|
||||||
|
|
||||||
@item -bt[N]
|
@item -bt[N]
|
||||||
Display N callers in stack traces. This is useful with @option{-g} or @option{-b}.
|
Display N callers in stack traces. This is useful with @option{-g} or @option{-b}.
|
||||||
|
6
tcc.h
6
tcc.h
@ -184,7 +184,9 @@ extern long double strtold (const char *__nptr, char **__endptr);
|
|||||||
|
|
||||||
#if defined TCC_IS_NATIVE && !defined CONFIG_TCCBOOT
|
#if defined TCC_IS_NATIVE && !defined CONFIG_TCCBOOT
|
||||||
# define CONFIG_TCC_BACKTRACE
|
# define CONFIG_TCC_BACKTRACE
|
||||||
# if (defined TCC_TARGET_I386 || defined TCC_TARGET_X86_64) \
|
# if (defined TCC_TARGET_I386 || defined TCC_TARGET_X86_64 || \
|
||||||
|
defined TCC_TARGET_ARM || defined TCC_TARGET_ARM64) || \
|
||||||
|
defined TCC_TARGET_RISCV64 \
|
||||||
&& !defined TCC_UCLIBC && !defined TCC_MUSL
|
&& !defined TCC_UCLIBC && !defined TCC_MUSL
|
||||||
# define CONFIG_TCC_BCHECK /* enable bound checking code */
|
# define CONFIG_TCC_BCHECK /* enable bound checking code */
|
||||||
# endif
|
# endif
|
||||||
@ -354,6 +356,7 @@ extern long double strtold (const char *__nptr, char **__endptr);
|
|||||||
#elif defined TCC_TARGET_ARM64
|
#elif defined TCC_TARGET_ARM64
|
||||||
# include "arm64-gen.c"
|
# include "arm64-gen.c"
|
||||||
# include "arm64-link.c"
|
# include "arm64-link.c"
|
||||||
|
# include "arm-asm.c"
|
||||||
#elif defined TCC_TARGET_C67
|
#elif defined TCC_TARGET_C67
|
||||||
# define TCC_TARGET_COFF
|
# define TCC_TARGET_COFF
|
||||||
# include "coff.h"
|
# include "coff.h"
|
||||||
@ -362,6 +365,7 @@ extern long double strtold (const char *__nptr, char **__endptr);
|
|||||||
#elif defined(TCC_TARGET_RISCV64)
|
#elif defined(TCC_TARGET_RISCV64)
|
||||||
# include "riscv64-gen.c"
|
# include "riscv64-gen.c"
|
||||||
# include "riscv64-link.c"
|
# include "riscv64-link.c"
|
||||||
|
# include "riscv64-asm.c"
|
||||||
#else
|
#else
|
||||||
#error unknown target
|
#error unknown target
|
||||||
#endif
|
#endif
|
||||||
|
2
tccelf.c
2
tccelf.c
@ -3123,7 +3123,7 @@ ST_FUNC int tcc_load_dll(TCCState *s1, int fd, const char *filename, int level)
|
|||||||
/* test CPU specific stuff */
|
/* test CPU specific stuff */
|
||||||
if (ehdr.e_ident[5] != ELFDATA2LSB ||
|
if (ehdr.e_ident[5] != ELFDATA2LSB ||
|
||||||
ehdr.e_machine != EM_TCC_TARGET) {
|
ehdr.e_machine != EM_TCC_TARGET) {
|
||||||
tcc_error_noabort("bad architecture");
|
tcc_error_noabort("bad architecture: %s", filename);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
31
tccgen.c
31
tccgen.c
@ -886,6 +886,12 @@ ST_FUNC void put_extern_sym2(Sym *sym, int sh_num,
|
|||||||
/* XXX: avoid doing that for statics ? */
|
/* XXX: avoid doing that for statics ? */
|
||||||
/* if bound checking is activated, we change some function
|
/* if bound checking is activated, we change some function
|
||||||
names by adding the "__bound" prefix */
|
names by adding the "__bound" prefix */
|
||||||
|
#if defined(TCC_TARGET_ARM) && defined(TCC_ARM_EABI)
|
||||||
|
if (strcmp (name, "memcpy") == 0 ||
|
||||||
|
strcmp (name, "memmove") == 0 ||
|
||||||
|
strcmp (name, "memset") == 0)
|
||||||
|
goto add_bound;
|
||||||
|
#endif
|
||||||
switch(sym->v) {
|
switch(sym->v) {
|
||||||
#ifdef TCC_TARGET_PE
|
#ifdef TCC_TARGET_PE
|
||||||
/* XXX: we rely only on malloc hooks */
|
/* XXX: we rely only on malloc hooks */
|
||||||
@ -897,6 +903,10 @@ ST_FUNC void put_extern_sym2(Sym *sym, int sh_num,
|
|||||||
#endif
|
#endif
|
||||||
case TOK_memcpy:
|
case TOK_memcpy:
|
||||||
case TOK_memmove:
|
case TOK_memmove:
|
||||||
|
#if defined(TCC_TARGET_ARM) && defined(TCC_ARM_EABI)
|
||||||
|
case TOK_memmove4:
|
||||||
|
case TOK_memmove8:
|
||||||
|
#endif
|
||||||
case TOK_memset:
|
case TOK_memset:
|
||||||
case TOK_memcmp:
|
case TOK_memcmp:
|
||||||
case TOK_strlen:
|
case TOK_strlen:
|
||||||
@ -907,12 +917,17 @@ ST_FUNC void put_extern_sym2(Sym *sym, int sh_num,
|
|||||||
case TOK_strcat:
|
case TOK_strcat:
|
||||||
case TOK_strchr:
|
case TOK_strchr:
|
||||||
case TOK_strdup:
|
case TOK_strdup:
|
||||||
|
#if defined TCC_TARGET_I386 || defined TCC_TARGET_X86_64
|
||||||
case TOK_alloca:
|
case TOK_alloca:
|
||||||
|
#endif
|
||||||
case TOK_mmap:
|
case TOK_mmap:
|
||||||
case TOK_munmap:
|
case TOK_munmap:
|
||||||
case TOK_longjmp:
|
case TOK_longjmp:
|
||||||
#ifndef TCC_TARGET_PE
|
#ifndef TCC_TARGET_PE
|
||||||
case TOK_siglongjmp:
|
case TOK_siglongjmp:
|
||||||
|
#endif
|
||||||
|
#if defined(TCC_TARGET_ARM) && defined(TCC_ARM_EABI)
|
||||||
|
add_bound:
|
||||||
#endif
|
#endif
|
||||||
strcpy(buf, "__bound_");
|
strcpy(buf, "__bound_");
|
||||||
strcat(buf, name);
|
strcat(buf, name);
|
||||||
@ -3820,9 +3835,9 @@ ST_FUNC void vstore(void)
|
|||||||
/* address of memcpy() */
|
/* address of memcpy() */
|
||||||
#ifdef TCC_ARM_EABI
|
#ifdef TCC_ARM_EABI
|
||||||
if(!(align & 7))
|
if(!(align & 7))
|
||||||
vpush_global_sym(&func_old_type, TOK_memcpy8);
|
vpush_global_sym(&func_old_type, TOK_memmove8);
|
||||||
else if(!(align & 3))
|
else if(!(align & 3))
|
||||||
vpush_global_sym(&func_old_type, TOK_memcpy4);
|
vpush_global_sym(&func_old_type, TOK_memmove4);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
/* Use memmove, rather than memcpy, as dest and src may be same: */
|
/* Use memmove, rather than memcpy, as dest and src may be same: */
|
||||||
@ -5636,13 +5651,25 @@ ST_FUNC void unary(void)
|
|||||||
mk_pointer(&type);
|
mk_pointer(&type);
|
||||||
vset(&type, VT_LOCAL, 0); /* local frame */
|
vset(&type, VT_LOCAL, 0); /* local frame */
|
||||||
while (level--) {
|
while (level--) {
|
||||||
|
#ifdef TCC_TARGET_RISCV64
|
||||||
|
vpushi(2*PTR_SIZE);
|
||||||
|
gen_op('-');
|
||||||
|
#endif
|
||||||
mk_pointer(&vtop->type);
|
mk_pointer(&vtop->type);
|
||||||
indir(); /* -> parent frame */
|
indir(); /* -> parent frame */
|
||||||
}
|
}
|
||||||
if (tok1 == TOK_builtin_return_address) {
|
if (tok1 == TOK_builtin_return_address) {
|
||||||
// assume return address is just above frame pointer on stack
|
// assume return address is just above frame pointer on stack
|
||||||
|
#ifdef TCC_TARGET_ARM
|
||||||
|
vpushi(2*PTR_SIZE);
|
||||||
|
gen_op('+');
|
||||||
|
#elif defined TCC_TARGET_RISCV64
|
||||||
|
vpushi(PTR_SIZE);
|
||||||
|
gen_op('-');
|
||||||
|
#else
|
||||||
vpushi(PTR_SIZE);
|
vpushi(PTR_SIZE);
|
||||||
gen_op('+');
|
gen_op('+');
|
||||||
|
#endif
|
||||||
mk_pointer(&vtop->type);
|
mk_pointer(&vtop->type);
|
||||||
indir();
|
indir();
|
||||||
}
|
}
|
||||||
|
39
tccrun.c
39
tccrun.c
@ -648,10 +648,12 @@ static void rt_getcontext(ucontext_t *uc, rt_context *rc)
|
|||||||
#elif defined(__arm__)
|
#elif defined(__arm__)
|
||||||
rc->ip = uc->uc_mcontext.arm_pc;
|
rc->ip = uc->uc_mcontext.arm_pc;
|
||||||
rc->fp = uc->uc_mcontext.arm_fp;
|
rc->fp = uc->uc_mcontext.arm_fp;
|
||||||
rc->sp = uc->uc_mcontext.arm_sp;
|
|
||||||
#elif defined(__aarch64__)
|
#elif defined(__aarch64__)
|
||||||
rc->ip = uc->uc_mcontext.pc;
|
rc->ip = uc->uc_mcontext.pc;
|
||||||
rc->fp = uc->uc_mcontext.regs[29];
|
rc->fp = uc->uc_mcontext.regs[29];
|
||||||
|
#elif defined(__riscv)
|
||||||
|
rc->ip = uc->uc_mcontext.__gregs[REG_PC];
|
||||||
|
rc->fp = uc->uc_mcontext.__gregs[REG_S0];
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -805,22 +807,9 @@ static int rt_get_caller_pc(addr_t *paddr, rt_context *rc, int level)
|
|||||||
*paddr = rc->ip;
|
*paddr = rc->ip;
|
||||||
} else {
|
} else {
|
||||||
addr_t fp = rc->fp;
|
addr_t fp = rc->fp;
|
||||||
addr_t sp = rc->sp;
|
while (--level)
|
||||||
if (sp < 0x1000)
|
fp = ((addr_t *)fp)[0];
|
||||||
sp = 0x1000;
|
*paddr = ((addr_t *)fp)[2];
|
||||||
/* XXX: specific to tinycc stack frames */
|
|
||||||
if (fp < sp + 12 || fp & 3)
|
|
||||||
return -1;
|
|
||||||
while (--level) {
|
|
||||||
sp = ((addr_t *)fp)[-2];
|
|
||||||
if (sp < fp || sp - fp > 16 || sp & 3)
|
|
||||||
return -1;
|
|
||||||
fp = ((addr_t *)fp)[-3];
|
|
||||||
if (fp <= sp || fp - sp < 12 || fp & 3)
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
/* XXX: check address validity with program info */
|
|
||||||
*paddr = ((addr_t *)fp)[-1];
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
@ -829,7 +818,7 @@ static int rt_get_caller_pc(addr_t *paddr, rt_context *rc, int level)
|
|||||||
#elif defined(__aarch64__)
|
#elif defined(__aarch64__)
|
||||||
static int rt_get_caller_pc(addr_t *paddr, rt_context *rc, int level)
|
static int rt_get_caller_pc(addr_t *paddr, rt_context *rc, int level)
|
||||||
{
|
{
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
*paddr = rc->ip;
|
*paddr = rc->ip;
|
||||||
} else {
|
} else {
|
||||||
addr_t *fp = (addr_t*)rc->fp;
|
addr_t *fp = (addr_t*)rc->fp;
|
||||||
@ -840,6 +829,20 @@ static int rt_get_caller_pc(addr_t *paddr, rt_context *rc, int level)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#elif defined(__riscv)
|
||||||
|
static int rt_get_caller_pc(addr_t *paddr, rt_context *rc, int level)
|
||||||
|
{
|
||||||
|
if (level == 0) {
|
||||||
|
*paddr = rc->ip;
|
||||||
|
} else {
|
||||||
|
addr_t *fp = (addr_t*)rc->fp;
|
||||||
|
while (--level)
|
||||||
|
fp = (addr_t *)fp[-2];
|
||||||
|
*paddr = fp[-1];
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#warning add arch specific rt_get_caller_pc()
|
#warning add arch specific rt_get_caller_pc()
|
||||||
static int rt_get_caller_pc(addr_t *paddr, rt_context *rc, int level)
|
static int rt_get_caller_pc(addr_t *paddr, rt_context *rc, int level)
|
||||||
|
4
tcctok.h
4
tcctok.h
@ -205,9 +205,9 @@
|
|||||||
#if defined TCC_TARGET_ARM
|
#if defined TCC_TARGET_ARM
|
||||||
# ifdef TCC_ARM_EABI
|
# ifdef TCC_ARM_EABI
|
||||||
DEF(TOK_memcpy, "__aeabi_memcpy")
|
DEF(TOK_memcpy, "__aeabi_memcpy")
|
||||||
DEF(TOK_memcpy4, "__aeabi_memcpy4")
|
|
||||||
DEF(TOK_memcpy8, "__aeabi_memcpy8")
|
|
||||||
DEF(TOK_memmove, "__aeabi_memmove")
|
DEF(TOK_memmove, "__aeabi_memmove")
|
||||||
|
DEF(TOK_memmove4, "__aeabi_memmove4")
|
||||||
|
DEF(TOK_memmove8, "__aeabi_memmove8")
|
||||||
DEF(TOK_memset, "__aeabi_memset")
|
DEF(TOK_memset, "__aeabi_memset")
|
||||||
DEF(TOK___aeabi_ldivmod, "__aeabi_ldivmod")
|
DEF(TOK___aeabi_ldivmod, "__aeabi_ldivmod")
|
||||||
DEF(TOK___aeabi_uldivmod, "__aeabi_uldivmod")
|
DEF(TOK___aeabi_uldivmod, "__aeabi_uldivmod")
|
||||||
|
@ -35,6 +35,15 @@ endif
|
|||||||
ifeq ($(ARCH),x86_64)
|
ifeq ($(ARCH),x86_64)
|
||||||
TESTS += $(BTESTS)
|
TESTS += $(BTESTS)
|
||||||
endif
|
endif
|
||||||
|
ifeq ($(ARCH),arm)
|
||||||
|
TESTS += $(BTESTS)
|
||||||
|
endif
|
||||||
|
ifeq ($(ARCH),arm64)
|
||||||
|
TESTS += $(BTESTS)
|
||||||
|
endif
|
||||||
|
ifeq ($(ARCH),riscv64)
|
||||||
|
TESTS += $(BTESTS)
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
ifdef CONFIG_OSX # -run only
|
ifdef CONFIG_OSX # -run only
|
||||||
TESTS := hello-run libtest tests2-dir pp-dir
|
TESTS := hello-run libtest tests2-dir pp-dir
|
||||||
|
@ -169,21 +169,34 @@ int test13(void)
|
|||||||
return strlen(tab);
|
return strlen(tab);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined __i386__ || defined __x86_64__
|
||||||
|
#define allocf(x)
|
||||||
|
#else
|
||||||
|
#define alloca(x) malloc(x)
|
||||||
|
#define allocf(x) free(x)
|
||||||
|
#endif
|
||||||
|
|
||||||
int test14(void)
|
int test14(void)
|
||||||
{
|
{
|
||||||
char *p = alloca(TAB_SIZE);
|
char *p = alloca(TAB_SIZE);
|
||||||
|
size_t ret;
|
||||||
memset(p, 'a', TAB_SIZE);
|
memset(p, 'a', TAB_SIZE);
|
||||||
p[TAB_SIZE-1] = 0;
|
p[TAB_SIZE-1] = 0;
|
||||||
return strlen(p);
|
ret = strlen(p);
|
||||||
|
allocf(p);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* error */
|
/* error */
|
||||||
int test15(void)
|
int test15(void)
|
||||||
{
|
{
|
||||||
char *p = alloca(TAB_SIZE-1);
|
char *p = alloca(TAB_SIZE-1);
|
||||||
|
size_t ret;
|
||||||
memset(p, 'a', TAB_SIZE);
|
memset(p, 'a', TAB_SIZE);
|
||||||
p[TAB_SIZE-1] = 0;
|
p[TAB_SIZE-1] = 0;
|
||||||
return strlen(p);
|
ret = strlen(p);
|
||||||
|
allocf(p);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ok */
|
/* ok */
|
||||||
@ -194,6 +207,7 @@ int test16()
|
|||||||
|
|
||||||
p = alloca(16);
|
p = alloca(16);
|
||||||
strcpy(p,"12345678901234");
|
strcpy(p,"12345678901234");
|
||||||
|
allocf(p);
|
||||||
|
|
||||||
/* Test alloca embedded in a larger expression */
|
/* Test alloca embedded in a larger expression */
|
||||||
printf("alloca : %s : %s\n", p, strcpy(alloca(strlen(demo)+1),demo) );
|
printf("alloca : %s : %s\n", p, strcpy(alloca(strlen(demo)+1),demo) );
|
||||||
@ -209,6 +223,7 @@ int test17()
|
|||||||
|
|
||||||
p = alloca(16);
|
p = alloca(16);
|
||||||
strcpy(p,"12345678901234");
|
strcpy(p,"12345678901234");
|
||||||
|
allocf(p);
|
||||||
|
|
||||||
/* Test alloca embedded in a larger expression */
|
/* Test alloca embedded in a larger expression */
|
||||||
printf("alloca : %s : %s\n", p, strcpy(alloca(strlen(demo)),demo) );
|
printf("alloca : %s : %s\n", p, strcpy(alloca(strlen(demo)),demo) );
|
||||||
|
@ -23,8 +23,11 @@ ifeq (,$(filter i386,$(ARCH)))
|
|||||||
SKIP += 98_al_ax_extend.test 99_fastcall.test
|
SKIP += 98_al_ax_extend.test 99_fastcall.test
|
||||||
endif
|
endif
|
||||||
ifeq (,$(filter i386 x86_64,$(ARCH)))
|
ifeq (,$(filter i386 x86_64,$(ARCH)))
|
||||||
SKIP += 85_asm-outside-function.test
|
SKIP += 85_asm-outside-function.test # x86 asm
|
||||||
SKIP += 112_backtrace.test 113_btdll.test
|
SKIP += 113_btdll.test # dll support needed
|
||||||
|
endif
|
||||||
|
ifeq (,$(filter i386 x86_64 arm arm64 riscv64,$(ARCH)))
|
||||||
|
SKIP += 112_backtrace.test
|
||||||
SKIP += 114_bound_signal.test
|
SKIP += 114_bound_signal.test
|
||||||
SKIP += 115_bound_setjmp.test
|
SKIP += 115_bound_setjmp.test
|
||||||
SKIP += 116_bound_setjmp2.test
|
SKIP += 116_bound_setjmp2.test
|
||||||
@ -82,7 +85,7 @@ GEN-ALWAYS =
|
|||||||
|
|
||||||
112_backtrace.test: FLAGS += -dt -b
|
112_backtrace.test: FLAGS += -dt -b
|
||||||
112_backtrace.test 113_btdll.test: FILTER += \
|
112_backtrace.test 113_btdll.test: FILTER += \
|
||||||
-e 's;[0-9A-Fa-fx]\{8,\};........;g' \
|
-e 's;[0-9A-Fa-fx]\{5,\};........;g' \
|
||||||
-e 's;0x[0-9A-Fa-f]\+;0x?;g'
|
-e 's;0x[0-9A-Fa-f]\+;0x?;g'
|
||||||
|
|
||||||
# this test creates two DLLs and an EXE
|
# this test creates two DLLs and an EXE
|
||||||
|
@ -744,8 +744,9 @@ static void gen_bounds_epilog(void)
|
|||||||
addr_t saved_ind;
|
addr_t saved_ind;
|
||||||
addr_t *bounds_ptr;
|
addr_t *bounds_ptr;
|
||||||
Sym *sym_data;
|
Sym *sym_data;
|
||||||
|
int offset_modified = func_bound_offset != lbounds_section->data_offset;
|
||||||
|
|
||||||
if (func_bound_offset == lbounds_section->data_offset && !func_bound_add_epilog)
|
if (!offset_modified && !func_bound_add_epilog)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* add end of table info */
|
/* add end of table info */
|
||||||
@ -756,7 +757,7 @@ static void gen_bounds_epilog(void)
|
|||||||
func_bound_offset, lbounds_section->data_offset);
|
func_bound_offset, lbounds_section->data_offset);
|
||||||
|
|
||||||
/* generate bound local allocation */
|
/* generate bound local allocation */
|
||||||
if (func_bound_offset != lbounds_section->data_offset) {
|
if (offset_modified) {
|
||||||
saved_ind = ind;
|
saved_ind = ind;
|
||||||
ind = func_bound_ind;
|
ind = func_bound_ind;
|
||||||
greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
|
greloca(cur_text_section, sym_data, ind + 2, R_X86_64_64, 0);
|
||||||
|
Loading…
Reference in New Issue
Block a user