Add support for apple m1

The apple m1 uses position independent executables (pie).
I have implemented this in tccmacho.c

Apple also uses the stack different for var_args.
Also characters are signed instead of unsigned.
This is implemented in arm64-gen.c/tccdefs.h

Add bounds checking lib to lib/Makefile.

Add underscore support in lib/atomic.S and lib/fetch_and_add.S

Disable __clear_cache in lib/lib-arm64.c (Use system version).
I will try to fix this in future push.

Disable test_asm_call in tests/tcctest.c. Clang does not support @plt.
Also disable weak symbols test.
I will try to fix weak support in future push.

Disable tests/tests2/124_atomic_counter.c for 64BITS.
This is a bug in the atomic code and will be fixed in future push.

You have to use --dwarf configure option. stabs only works with -run.

tested on apple x86_64(10.5) and arm64(12.3).
This commit is contained in:
herman ten brugge 2022-11-16 12:52:51 -06:00
parent c6b722f3f8
commit c8ef84c854
10 changed files with 736 additions and 129 deletions

View File

@ -38,7 +38,9 @@
#define MAX_ALIGN 16 #define MAX_ALIGN 16
#ifndef TCC_TARGET_MACHO
#define CHAR_IS_UNSIGNED #define CHAR_IS_UNSIGNED
#endif
/* define if return values need to be extended explicitely /* define if return values need to be extended explicitely
at caller side (for interfacing with non-TCC compilers) */ at caller side (for interfacing with non-TCC compilers) */
@ -810,7 +812,7 @@ static int arm64_hfa(CType *type, unsigned *fsize)
return 0; return 0;
} }
static unsigned long arm64_pcs_aux(int n, CType **type, unsigned long *a) static unsigned long arm64_pcs_aux(int variadic, int n, CType **type, unsigned long *a)
{ {
int nx = 0; // next integer register int nx = 0; // next integer register
int nv = 0; // next vector register int nv = 0; // next vector register
@ -827,6 +829,12 @@ static unsigned long arm64_pcs_aux(int n, CType **type, unsigned long *a)
else else
size = type_size(type[i], &align); size = type_size(type[i], &align);
#if defined(__APPLE__)
if (variadic && i == variadic) {
nx = 8;
nv = 8;
}
#endif
if (hfa) if (hfa)
// B.2 // B.2
; ;
@ -931,7 +939,7 @@ static unsigned long arm64_pcs_aux(int n, CType **type, unsigned long *a)
return ns - 32; return ns - 32;
} }
static unsigned long arm64_pcs(int n, CType **type, unsigned long *a) static unsigned long arm64_pcs(int variadic, int n, CType **type, unsigned long *a)
{ {
unsigned long stack; unsigned long stack;
@ -939,12 +947,12 @@ static unsigned long arm64_pcs(int n, CType **type, unsigned long *a)
if ((type[0]->t & VT_BTYPE) == VT_VOID) if ((type[0]->t & VT_BTYPE) == VT_VOID)
a[0] = -1; a[0] = -1;
else { else {
arm64_pcs_aux(1, type, a); arm64_pcs_aux(0, 1, type, a);
assert(a[0] == 0 || a[0] == 1 || a[0] == 16); assert(a[0] == 0 || a[0] == 1 || a[0] == 16);
} }
// Argument types: // Argument types:
stack = arm64_pcs_aux(n, type + 1, a + 1); stack = arm64_pcs_aux(variadic, n, type + 1, a + 1);
if (0) { if (0) {
int i; int i;
@ -970,6 +978,16 @@ static unsigned long arm64_pcs(int n, CType **type, unsigned long *a)
return stack; return stack;
} }
static int n_func_args(CType *type)
{
int n_args = 0;
Sym *arg;
for (arg = type->ref->next; arg; arg = arg->next)
n_args++;
return n_args;
}
ST_FUNC void gfunc_call(int nb_args) ST_FUNC void gfunc_call(int nb_args)
{ {
CType *return_type; CType *return_type;
@ -977,6 +995,8 @@ ST_FUNC void gfunc_call(int nb_args)
unsigned long *a, *a1; unsigned long *a, *a1;
unsigned long stack; unsigned long stack;
int i; int i;
int variadic = (vtop[-nb_args].type.ref->f.func_type == FUNC_ELLIPSIS);
int var_nb_arg = n_func_args(&vtop[-nb_args].type);
#ifdef CONFIG_TCC_BCHECK #ifdef CONFIG_TCC_BCHECK
if (tcc_state->do_bounds_check) if (tcc_state->do_bounds_check)
@ -995,7 +1015,7 @@ ST_FUNC void gfunc_call(int nb_args)
for (i = 0; i < nb_args; i++) for (i = 0; i < nb_args; i++)
t[nb_args - i] = &vtop[-i].type; t[nb_args - i] = &vtop[-i].type;
stack = arm64_pcs(nb_args, t, a); stack = arm64_pcs(variadic ? var_nb_arg : 0, nb_args, t, a);
// Allocate space for structs replaced by pointer: // Allocate space for structs replaced by pointer:
for (i = nb_args; i; i--) for (i = nb_args; i; i--)
@ -1055,7 +1075,7 @@ ST_FUNC void gfunc_call(int nb_args)
} }
else { else {
gv(RC_INT); gv(RC_INT);
arm64_strx(arm64_type_size(vtop[0].type.t), arm64_strx(3, // arm64_type_size(vtop[0].type.t),
intr(vtop[0].r), 31, a[i] - 32); intr(vtop[0].r), 31, a[i] - 32);
} }
} }
@ -1167,6 +1187,8 @@ ST_FUNC void gfunc_prolog(Sym *func_sym)
int use_x8 = 0; int use_x8 = 0;
int last_int = 0; int last_int = 0;
int last_float = 0; int last_float = 0;
int variadic = func_sym->type.ref->f.func_type == FUNC_ELLIPSIS;
int var_nb_arg = n_func_args(&func_sym->type);
func_vc = 144; // offset of where x8 is stored func_vc = 144; // offset of where x8 is stored
@ -1178,9 +1200,9 @@ ST_FUNC void gfunc_prolog(Sym *func_sym)
for (sym = func_type->ref; sym; sym = sym->next) for (sym = func_type->ref; sym; sym = sym->next)
t[i++] = &sym->type; t[i++] = &sym->type;
arm64_func_va_list_stack = arm64_pcs(n - 1, t, a); arm64_func_va_list_stack = arm64_pcs(variadic ? var_nb_arg : 0, n - 1, t, a);
if (func_sym->type.ref->f.func_type == FUNC_ELLIPSIS) { if (variadic) {
use_x8 = 1; use_x8 = 1;
last_int = 4; last_int = 4;
last_float = 4; last_float = 4;
@ -1282,6 +1304,7 @@ ST_FUNC void gen_va_start(void)
o(0x910383be); // add x30,x29,#224 o(0x910383be); // add x30,x29,#224
o(0xf900001e | r << 5); // str x30,[x(r)] o(0xf900001e | r << 5); // str x30,[x(r)]
#if !defined(__APPLE__)
if (arm64_func_va_list_gr_offs) { if (arm64_func_va_list_gr_offs) {
if (arm64_func_va_list_stack) if (arm64_func_va_list_stack)
o(0x910383be); // add x30,x29,#224 o(0x910383be); // add x30,x29,#224
@ -1298,6 +1321,7 @@ ST_FUNC void gen_va_start(void)
arm64_movimm(30, arm64_func_va_list_vr_offs); arm64_movimm(30, arm64_func_va_list_vr_offs);
o(0xb9001c1e | r << 5); // str w30,[x(r),#28] o(0xb9001c1e | r << 5); // str w30,[x(r),#28]
#endif
--vtop; --vtop;
} }
@ -1321,6 +1345,7 @@ ST_FUNC void gen_va_arg(CType *t)
if (!hfa) { if (!hfa) {
uint32_t n = size > 16 ? 8 : (size + 7) & -8; uint32_t n = size > 16 ? 8 : (size + 7) & -8;
#if !defined(__APPLE__)
o(0xb940181e | r0 << 5); // ldr w30,[x(r0),#24] // __gr_offs o(0xb940181e | r0 << 5); // ldr w30,[x(r0),#24] // __gr_offs
if (align == 16) { if (align == 16) {
assert(0); // this path untested but needed for __uint128_t assert(0); // this path untested but needed for __uint128_t
@ -1329,23 +1354,28 @@ ST_FUNC void gen_va_arg(CType *t)
} }
o(0x310003c0 | r1 | n << 10); // adds w(r1),w30,#(n) o(0x310003c0 | r1 | n << 10); // adds w(r1),w30,#(n)
o(0x540000ad); // b.le .+20 o(0x540000ad); // b.le .+20
#endif
o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack
o(0x9100001e | r1 << 5 | n << 10); // add x30,x(r1),#(n) o(0x9100001e | r1 << 5 | n << 10); // add x30,x(r1),#(n)
o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack
#if !defined(__APPLE__)
o(0x14000004); // b .+16 o(0x14000004); // b .+16
o(0xb9001800 | r1 | r0 << 5); // str w(r1),[x(r0),#24] // __gr_offs o(0xb9001800 | r1 | r0 << 5); // str w(r1),[x(r0),#24] // __gr_offs
o(0xf9400400 | r1 | r0 << 5); // ldr x(r1),[x(r0),#8] // __gr_top o(0xf9400400 | r1 | r0 << 5); // ldr x(r1),[x(r0),#8] // __gr_top
o(0x8b3ec000 | r1 | r1 << 5); // add x(r1),x(r1),w30,sxtw o(0x8b3ec000 | r1 | r1 << 5); // add x(r1),x(r1),w30,sxtw
#endif
if (size > 16) if (size > 16)
o(0xf9400000 | r1 | r1 << 5); // ldr x(r1),[x(r1)] o(0xf9400000 | r1 | r1 << 5); // ldr x(r1),[x(r1)]
} }
else { else {
uint32_t rsz = hfa << 4;
uint32_t ssz = (size + 7) & -(uint32_t)8; uint32_t ssz = (size + 7) & -(uint32_t)8;
#if !defined(__APPLE__)
uint32_t rsz = hfa << 4;
uint32_t b1, b2; uint32_t b1, b2;
o(0xb9401c1e | r0 << 5); // ldr w30,[x(r0),#28] // __vr_offs o(0xb9401c1e | r0 << 5); // ldr w30,[x(r0),#28] // __vr_offs
o(0x310003c0 | r1 | rsz << 10); // adds w(r1),w30,#(rsz) o(0x310003c0 | r1 | rsz << 10); // adds w(r1),w30,#(rsz)
b1 = ind; o(0x5400000d); // b.le lab1 b1 = ind; o(0x5400000d); // b.le lab1
#endif
o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack o(0xf9400000 | r1 | r0 << 5); // ldr x(r1),[x(r0)] // __stack
if (fsize == 16) { if (fsize == 16) {
o(0x91003c00 | r1 | r1 << 5); // add x(r1),x(r1),#15 o(0x91003c00 | r1 | r1 << 5); // add x(r1),x(r1),#15
@ -1353,6 +1383,7 @@ ST_FUNC void gen_va_arg(CType *t)
} }
o(0x9100001e | r1 << 5 | ssz << 10); // add x30,x(r1),#(ssz) o(0x9100001e | r1 << 5 | ssz << 10); // add x30,x(r1),#(ssz)
o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack o(0xf900001e | r0 << 5); // str x30,[x(r0)] // __stack
#if !defined(__APPLE__)
b2 = ind; o(0x14000000); // b lab2 b2 = ind; o(0x14000000); // b lab2
// lab1: // lab1:
write32le(cur_text_section->data + b1, 0x5400000d | (ind - b1) << 3); write32le(cur_text_section->data + b1, 0x5400000d | (ind - b1) << 3);
@ -1376,6 +1407,7 @@ ST_FUNC void gen_va_arg(CType *t)
} }
// lab2: // lab2:
write32le(cur_text_section->data + b2, 0x14000000 | (ind - b2) >> 2); write32le(cur_text_section->data + b2, 0x14000000 | (ind - b2) >> 2);
#endif
} }
} }
@ -1390,7 +1422,7 @@ ST_FUNC void gfunc_return(CType *func_type)
CType *t = func_type; CType *t = func_type;
unsigned long a; unsigned long a;
arm64_pcs(0, &t, &a); arm64_pcs(0, 0, &t, &a);
switch (a) { switch (a) {
case -1: case -1:
break; break;

View File

@ -207,11 +207,18 @@
&~3), *(type *)(ap - ((sizeof(type)+3)&~3))) &~3), *(type *)(ap - ((sizeof(type)+3)&~3)))
#elif defined __aarch64__ #elif defined __aarch64__
#if defined __APPLE__
typedef struct {
void *__stack;
} __builtin_va_list;
#else
typedef struct { typedef struct {
void *__stack, *__gr_top, *__vr_top; void *__stack, *__gr_top, *__vr_top;
int __gr_offs, __vr_offs; int __gr_offs, __vr_offs;
} __builtin_va_list; } __builtin_va_list;
#endif
#elif defined __riscv #elif defined __riscv
typedef char *__builtin_va_list; typedef char *__builtin_va_list;
#define __va_reg_size (__riscv_xlen >> 3) #define __va_reg_size (__riscv_xlen >> 3)

View File

@ -55,7 +55,7 @@ OBJ-x86_64-osx = $(X86_64_O) va_list.o $(BCHECK_O)
OBJ-i386-win32 = $(I386_O) chkstk.o $(B_O) $(WIN_O) OBJ-i386-win32 = $(I386_O) chkstk.o $(B_O) $(WIN_O)
OBJ-x86_64-win32 = $(X86_64_O) chkstk.o $(B_O) $(WIN_O) OBJ-x86_64-win32 = $(X86_64_O) chkstk.o $(B_O) $(WIN_O)
OBJ-arm64 = $(ARM64_O) $(BCHECK_O) $(DSO_O) OBJ-arm64 = $(ARM64_O) $(BCHECK_O) $(DSO_O)
OBJ-arm64-osx = $(ARM64_O) va_list.o OBJ-arm64-osx = $(ARM64_O) $(BCHECK_O)
OBJ-arm = $(ARM_O) $(BCHECK_O) $(DSO_O) OBJ-arm = $(ARM_O) $(BCHECK_O) $(DSO_O)
OBJ-arm-fpa = $(ARM_O) $(DSO_O) OBJ-arm-fpa = $(ARM_O) $(DSO_O)
OBJ-arm-fpa-ld = $(ARM_O) $(DSO_O) OBJ-arm-fpa-ld = $(ARM_O) $(DSO_O)

View File

@ -5,6 +5,13 @@
* __atomic_compare_exchange_4 * __atomic_compare_exchange_4
* __atomic_compare_exchange_8 * __atomic_compare_exchange_8
*/ */
#ifdef __leading_underscore
# define _(s) _##s
#else
# define _(s) s
#endif
#if defined __arm__ #if defined __arm__
#ifndef __TINYC__ #ifndef __TINYC__
@ -14,9 +21,9 @@
.text .text
.align 2 .align 2
.global __atomic_compare_exchange_1 .global _(__atomic_compare_exchange_1)
.type __atomic_compare_exchange_1, %function .type _(__atomic_compare_exchange_1), %function
__atomic_compare_exchange_1: _(__atomic_compare_exchange_1):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0xe52de004 .int 0xe52de004
.int 0xe5d13000 .int 0xe5d13000
@ -50,11 +57,11 @@ __atomic_compare_exchange_1:
strbne ip, [r1] strbne ip, [r1]
ldr pc, [sp], #4 ldr pc, [sp], #4
#endif #endif
.size __atomic_compare_exchange_1, .-__atomic_compare_exchange_1 .size _(__atomic_compare_exchange_1), .-_(__atomic_compare_exchange_1)
.global __atomic_compare_exchange_2 .global _(__atomic_compare_exchange_2)
.type __atomic_compare_exchange_2, %function .type _(__atomic_compare_exchange_2), %function
__atomic_compare_exchange_2: _(__atomic_compare_exchange_2):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0xe52de004 .int 0xe52de004
.int 0xe1d130b0 .int 0xe1d130b0
@ -88,11 +95,11 @@ __atomic_compare_exchange_2:
strhne ip, [r1] strhne ip, [r1]
ldr pc, [sp], #4 ldr pc, [sp], #4
#endif #endif
.size __atomic_compare_exchange_2, .-__atomic_compare_exchange_2 .size _(__atomic_compare_exchange_2), .-_(__atomic_compare_exchange_2)
.global __atomic_compare_exchange_4 .global _(__atomic_compare_exchange_4)
.type __atomic_compare_exchange_4, %function .type _(__atomic_compare_exchange_4), %function
__atomic_compare_exchange_4: _(__atomic_compare_exchange_4):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0xe52de004 .int 0xe52de004
.int 0xe5913000 .int 0xe5913000
@ -126,7 +133,7 @@ __atomic_compare_exchange_4:
strne ip, [r1] strne ip, [r1]
ldr pc, [sp], #4 ldr pc, [sp], #4
#endif #endif
.size __atomic_compare_exchange_4, .-__atomic_compare_exchange_4 .size _(__atomic_compare_exchange_4), .-_(__atomic_compare_exchange_4)
/* ---------------------------------------------- */ /* ---------------------------------------------- */
#elif defined __aarch64__ #elif defined __aarch64__
@ -134,9 +141,9 @@ __atomic_compare_exchange_4:
.text .text
.align 2 .align 2
.global __atomic_compare_exchange_1 .global _(__atomic_compare_exchange_1)
.type __atomic_compare_exchange_1, %function .type _(__atomic_compare_exchange_1), %function
__atomic_compare_exchange_1: _(__atomic_compare_exchange_1):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0xa9be7bfd .int 0xa9be7bfd
.int 0x910003fd .int 0x910003fd
@ -187,11 +194,11 @@ __atomic_compare_exchange_1:
ldp x29, x30, [sp], 32 ldp x29, x30, [sp], 32
ret ret
#endif #endif
.size __atomic_compare_exchange_1, .-__atomic_compare_exchange_1 .size _(__atomic_compare_exchange_1), .-_(__atomic_compare_exchange_1)
.global __atomic_compare_exchange_2 .global _(__atomic_compare_exchange_2)
.type __atomic_compare_exchange_2, %function .type _(__atomic_compare_exchange_2), %function
__atomic_compare_exchange_2: _(__atomic_compare_exchange_2):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0xa9be7bfd .int 0xa9be7bfd
.int 0x910003fd .int 0x910003fd
@ -242,11 +249,11 @@ __atomic_compare_exchange_2:
ldp x29, x30, [sp], 32 ldp x29, x30, [sp], 32
ret ret
#endif #endif
.size __atomic_compare_exchange_2, .-__atomic_compare_exchange_2 .size _(__atomic_compare_exchange_2), .-_(__atomic_compare_exchange_2)
.global __atomic_compare_exchange_4 .global _(__atomic_compare_exchange_4)
.type __atomic_compare_exchange_4, %function .type _(__atomic_compare_exchange_4), %function
__atomic_compare_exchange_4: _(__atomic_compare_exchange_4):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0xa9be7bfd .int 0xa9be7bfd
.int 0x910003fd .int 0x910003fd
@ -297,11 +304,11 @@ __atomic_compare_exchange_4:
ldp x29, x30, [sp], 32 ldp x29, x30, [sp], 32
ret ret
#endif #endif
.size __atomic_compare_exchange_4, .-__atomic_compare_exchange_4 .size _(__atomic_compare_exchange_4), .-_(__atomic_compare_exchange_4)
.global __atomic_compare_exchange_8 .global _(__atomic_compare_exchange_8)
.type __atomic_compare_exchange_8, %function .type _(__atomic_compare_exchange_8), %function
__atomic_compare_exchange_8: _(__atomic_compare_exchange_8):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0xa9be7bfd .int 0xa9be7bfd
.int 0x910003fd .int 0x910003fd
@ -352,7 +359,7 @@ __atomic_compare_exchange_8:
ldp x29, x30, [sp], 32 ldp x29, x30, [sp], 32
ret ret
#endif #endif
.size __atomic_compare_exchange_8, .-__atomic_compare_exchange_8 .size _(__atomic_compare_exchange_8), .-_(__atomic_compare_exchange_8)
/* ---------------------------------------------- */ /* ---------------------------------------------- */
#elif defined __riscv #elif defined __riscv
@ -360,9 +367,9 @@ __atomic_compare_exchange_8:
.text .text
.align 2 .align 2
.global __atomic_compare_exchange_1 .global _(__atomic_compare_exchange_1)
.type __atomic_compare_exchange_1, %function .type _(__atomic_compare_exchange_1), %function
__atomic_compare_exchange_1: _(__atomic_compare_exchange_1):
#ifdef __TINYC__ #ifdef __TINYC__
.short 0x1141 .short 0x1141
.short 0x86ba .short 0x86ba
@ -443,11 +450,11 @@ __atomic_compare_exchange_1:
addi sp,sp,16 addi sp,sp,16
jr ra jr ra
#endif #endif
.size __atomic_compare_exchange_1, .-__atomic_compare_exchange_1 .size _(__atomic_compare_exchange_1), .-_(__atomic_compare_exchange_1)
.global __atomic_compare_exchange_2 .global _(__atomic_compare_exchange_2)
.type __atomic_compare_exchange_2, %function .type _(__atomic_compare_exchange_2), %function
__atomic_compare_exchange_2: _(__atomic_compare_exchange_2):
#ifdef __TINYC__ #ifdef __TINYC__
.short 0x1141 .short 0x1141
.short 0x86ba .short 0x86ba
@ -530,11 +537,11 @@ __atomic_compare_exchange_2:
addi sp,sp,16 addi sp,sp,16
jr ra jr ra
#endif #endif
.size __atomic_compare_exchange_2, .-__atomic_compare_exchange_2 .size _(__atomic_compare_exchange_2), .-_(__atomic_compare_exchange_2)
.global __atomic_compare_exchange_4 .global _(__atomic_compare_exchange_4)
.type __atomic_compare_exchange_4, %function .type _(__atomic_compare_exchange_4), %function
__atomic_compare_exchange_4: _(__atomic_compare_exchange_4):
#ifdef __TINYC__ #ifdef __TINYC__
.short 0x419c .short 0x419c
.int 0x0f50000f .int 0x0f50000f
@ -565,11 +572,11 @@ __atomic_compare_exchange_4:
andi a0,a0,1 andi a0,a0,1
ret ret
#endif #endif
.size __atomic_compare_exchange_4, .-__atomic_compare_exchange_4 .size _(__atomic_compare_exchange_4), .-_(__atomic_compare_exchange_4)
.global __atomic_compare_exchange_8 .global _(__atomic_compare_exchange_8)
.type __atomic_compare_exchange_8, %function .type _(__atomic_compare_exchange_8), %function
__atomic_compare_exchange_8: _(__atomic_compare_exchange_8):
#ifdef __TINYC__ #ifdef __TINYC__
.short 0x619c .short 0x619c
.int 0x0f50000f .int 0x0f50000f
@ -600,7 +607,7 @@ __atomic_compare_exchange_8:
andi a0,a0,1 andi a0,a0,1
ret ret
#endif #endif
.size __atomic_compare_exchange_8, .-__atomic_compare_exchange_8 .size _(__atomic_compare_exchange_8), .-_(__atomic_compare_exchange_8)
/* ---------------------------------------------- */ /* ---------------------------------------------- */
#endif #endif

View File

@ -1,11 +1,20 @@
/* ---------------------------------------------- */ /* ---------------------------------------------- */
#ifdef __leading_underscore
# define _(s) _##s
#else
# define _(s) s
#endif
.globl _(__bound_alloca)
_(__bound_alloca):
#if defined __arm__ #if defined __arm__
.text .text
.align 2 .align 2
.global fetch_and_add_arm .global _(fetch_and_add_arm)
.type fetch_and_add_arm, %function .type _(fetch_and_add_arm), %function
fetch_and_add_arm: _(fetch_and_add_arm):
mcr p15, #0, r0, c7, c10, #5 mcr p15, #0, r0, c7, c10, #5
.L0: .L0:
ldrex r3, [r0] ldrex r3, [r0]
@ -15,16 +24,16 @@ fetch_and_add_arm:
bne .L0 bne .L0
mcr p15, #0, r0, c7, c10, #5 mcr p15, #0, r0, c7, c10, #5
bx lr bx lr
.size fetch_and_add_arm, .-fetch_and_add_arm .size _(fetch_and_add_arm), .-_(fetch_and_add_arm)
/* ---------------------------------------------- */ /* ---------------------------------------------- */
#elif defined __aarch64__ #elif defined __aarch64__
.text .text
.align 2 .align 2
.global fetch_and_add_arm64 .global _(fetch_and_add_arm64)
.type fetch_and_add_arm64, %function .type _(fetch_and_add_arm64), %function
fetch_and_add_arm64: _(fetch_and_add_arm64):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0x885f7c02 .int 0x885f7c02
.int 0x0b010042 .int 0x0b010042
@ -36,21 +45,21 @@ fetch_and_add_arm64:
ldxr w2, [x0] ldxr w2, [x0]
add w2, w2, w1 add w2, w2, w1
stlxr w3, w2, [x0] stlxr w3, w2, [x0]
cbnz w3, fetch_and_add_arm64 cbnz w3, _(fetch_and_add_arm64)
dmb ish dmb ish
ret ret
#endif #endif
.size fetch_and_add_arm64, .-fetch_and_add_arm64 .size _(fetch_and_add_arm64), .-_(fetch_and_add_arm64)
/* ---------------------------------------------- */ /* ---------------------------------------------- */
#elif defined __riscv #elif defined __riscv
.text .text
.align 2 .align 2
.global fetch_and_add_riscv64 .global _(fetch_and_add_riscv64)
.type fetch_and_add_riscv64, %function .type _(fetch_and_add_riscv64), %function
fetch_and_add_riscv64: _(fetch_and_add_riscv64):
#ifdef __TINYC__ #ifdef __TINYC__
.int 0x0f50000f .int 0x0f50000f
.int 0x004b5202f .int 0x004b5202f
@ -61,7 +70,7 @@ fetch_and_add_riscv64:
ret ret
#endif #endif
.size fetch_and_add_riscv64, .-fetch_and_add_riscv64 .size _(fetch_and_add_riscv64), .-_(fetch_and_add_riscv64)
/* ---------------------------------------------- */ /* ---------------------------------------------- */
#endif #endif

View File

@ -24,7 +24,7 @@ void *memcpy(void*,void*,__SIZE_TYPE__);
#include <string.h> #include <string.h>
#endif #endif
#ifndef __riscv #if !defined __riscv && !defined __APPLE__
void __clear_cache(void *beg, void *end) void __clear_cache(void *beg, void *end)
{ {
__arm64_clear_cache(beg, end); __arm64_clear_cache(beg, end);

View File

@ -1487,7 +1487,7 @@ static void add_init_array_defines(TCCState *s1, const char *section_name)
addr_t end_offset; addr_t end_offset;
char buf[1024]; char buf[1024];
s = find_section_create(s1, section_name, 0); s = find_section_create(s1, section_name, 0);
if (!s) { if (!s || !(s->sh_flags & SHF_ALLOC)) {
end_offset = 0; end_offset = 0;
s = data_section; s = data_section;
} else { } else {

View File

@ -31,9 +31,28 @@
to setup our own crt code. We're not using lazy linking, so even function to setup our own crt code. We're not using lazy linking, so even function
calls are resolved at startup. */ calls are resolved at startup. */
#if !defined TCC_TARGET_X86_64 && !defined TCC_TARGET_ARM64
#error Platform not supported
#endif
#define DEBUG_MACHO 0 #define DEBUG_MACHO 0
#define dprintf if (DEBUG_MACHO) printf #define dprintf if (DEBUG_MACHO) printf
#define MH_EXECUTE (0x2)
#define MH_DYLDLINK (0x4)
#define MH_PIE (0x200000)
#define CPU_SUBTYPE_LIB64 (0x80000000)
#define CPU_SUBTYPE_X86_ALL (3)
#define CPU_SUBTYPE_ARM64_ALL (0)
#define CPU_ARCH_ABI64 (0x01000000)
#define CPU_TYPE_X86 (7)
#define CPU_TYPE_X86_64 (CPU_TYPE_X86 | CPU_ARCH_ABI64)
#define CPU_TYPE_ARM (12)
#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
struct fat_header { struct fat_header {
uint32_t magic; /* FAT_MAGIC or FAT_MAGIC_64 */ uint32_t magic; /* FAT_MAGIC or FAT_MAGIC_64 */
uint32_t nfat_arch; /* number of structs that follow */ uint32_t nfat_arch; /* number of structs that follow */
@ -86,6 +105,7 @@ struct load_command {
#define LC_LOAD_DYLINKER 0xe #define LC_LOAD_DYLINKER 0xe
#define LC_SEGMENT_64 0x19 #define LC_SEGMENT_64 0x19
#define LC_REEXPORT_DYLIB (0x1f | LC_REQ_DYLD) #define LC_REEXPORT_DYLIB (0x1f | LC_REQ_DYLD)
#define LC_DYLD_INFO_ONLY (0x22|LC_REQ_DYLD)
#define LC_MAIN (0x28|LC_REQ_DYLD) #define LC_MAIN (0x28|LC_REQ_DYLD)
typedef int vm_prot_t; typedef int vm_prot_t;
@ -122,6 +142,8 @@ struct section_64 { /* for 64-bit architectures */
#define S_REGULAR 0x0 #define S_REGULAR 0x0
#define S_ZEROFILL 0x1 #define S_ZEROFILL 0x1
#define S_NON_LAZY_SYMBOL_POINTERS 0x6 #define S_NON_LAZY_SYMBOL_POINTERS 0x6
#define S_LAZY_SYMBOL_POINTERS 0x7
#define S_SYMBOL_STUBS 0x8
#define S_MOD_INIT_FUNC_POINTERS 0x9 #define S_MOD_INIT_FUNC_POINTERS 0x9
#define S_MOD_TERM_FUNC_POINTERS 0xa #define S_MOD_TERM_FUNC_POINTERS 0xa
@ -187,6 +209,38 @@ struct dysymtab_command {
uint32_t nlocrel; /* number of local relocation entries */ uint32_t nlocrel; /* number of local relocation entries */
}; };
#define BIND_OPCODE_DONE 0x00
#define BIND_OPCODE_SET_DYLIB_SPECIAL_IMM 0x30
#define BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM 0x40
#define BIND_OPCODE_SET_TYPE_IMM 0x50
#define BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB 0x70
#define BIND_OPCODE_DO_BIND 0x90
#define BIND_TYPE_POINTER 1
#define BIND_SPECIAL_DYLIB_FLAT_LOOKUP -2
#define REBASE_OPCODE_DONE 0x00
#define REBASE_OPCODE_SET_TYPE_IMM 0x10
#define REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB 0x20
#define REBASE_OPCODE_DO_REBASE_IMM_TIMES 0x50
#define REBASE_TYPE_POINTER 1
struct dyld_info_command {
uint32_t cmd; /* LC_DYLD_INFO or LC_DYLD_INFO_ONLY */
uint32_t cmdsize; /* sizeof(struct dyld_info_command) */
uint32_t rebase_off; /* file offset to rebase info */
uint32_t rebase_size; /* size of rebase info */
uint32_t bind_off; /* file offset to binding info */
uint32_t bind_size; /* size of binding info */
uint32_t weak_bind_off; /* file offset to weak binding info */
uint32_t weak_bind_size; /* size of weak binding info */
uint32_t lazy_bind_off; /* file offset to lazy binding info */
uint32_t lazy_bind_size; /* size of lazy binding infs */
uint32_t export_off; /* file offset to lazy binding info */
uint32_t export_size; /* size of lazy binding infs */
};
#define INDIRECT_SYMBOL_LOCAL 0x80000000 #define INDIRECT_SYMBOL_LOCAL 0x80000000
struct entry_point_command { struct entry_point_command {
@ -201,6 +255,7 @@ enum skind {
sk_discard, sk_discard,
sk_text, sk_text,
sk_stubs, sk_stubs,
sk_stub_helper,
sk_ro_data, sk_ro_data,
sk_uw_info, sk_uw_info,
sk_nl_ptr, // non-lazy pointers, aka GOT sk_nl_ptr, // non-lazy pointers, aka GOT
@ -208,6 +263,14 @@ enum skind {
sk_init, sk_init,
sk_fini, sk_fini,
sk_rw_data, sk_rw_data,
sk_stab,
sk_stab_str,
sk_debug_info,
sk_debug_abbrev,
sk_debug_line,
sk_debug_aranges,
sk_debug_str,
sk_debug_line_str,
sk_bss, sk_bss,
sk_linkedit, sk_linkedit,
sk_last sk_last
@ -241,9 +304,26 @@ struct macho {
} sk_to_sect[sk_last]; } sk_to_sect[sk_last];
int *elfsectomacho; int *elfsectomacho;
int *e2msym; int *e2msym;
Section *symtab, *strtab, *wdata, *indirsyms, *stubs; Section *rebase, *binding, *lazy_binding, *exports;
int stubsym; Section *symtab, *strtab, *wdata, *indirsyms;
Section *stubs, *stub_helper, *la_symbol_ptr;
int nr_plt, n_got;
struct dyld_info_command *dyldinfo;
int stubsym, helpsym, lasym, dyld_private, dyld_stub_binder;
uint32_t ilocal, iextdef, iundef; uint32_t ilocal, iextdef, iundef;
int n_lazy_bind_rebase;
struct lazy_bind_rebase {
int section;
int bind;
int bind_offset;
int la_symbol_offset;
ElfW_Rel rel;
} *lazy_bind_rebase;
int n_bind;
struct bind {
int section;
ElfW_Rel rel;
} *bind;
}; };
#define SHT_LINKEDIT (SHT_LOOS + 42) #define SHT_LINKEDIT (SHT_LOOS + 42)
@ -306,20 +386,165 @@ static void * add_dylib(struct macho *mo, char *name)
return lc; return lc;
} }
static void write_uleb128(Section *section, uint64_t value)
{
do {
unsigned char byte = value & 0x7f;
uint8_t *ptr = section_ptr_add(section, 1);
value >>= 7;
*ptr = byte | (value ? 0x80 : 0);
} while (value != 0);
}
static void tcc_macho_add_destructor(TCCState *s1)
{
int init_sym, mh_execute_header, at_exit_sym;
Section *s;
ElfW_Rel *rel;
uint8_t *ptr;
s = find_section(s1, ".fini_array");
if (s->data_offset == 0)
return;
init_sym = put_elf_sym(s1->symtab, text_section->data_offset, 0,
ELFW(ST_INFO)(STB_LOCAL, STT_FUNC), 0,
text_section->sh_num, "___GLOBAL_init_65535");
mh_execute_header = put_elf_sym(s1->symtab, 0x100000000ll, 0,
ELFW(ST_INFO)(STB_LOCAL, STT_OBJECT), 0,
SHN_ABS, "__mh_execute_header");
at_exit_sym = put_elf_sym(s1->symtab, 0, 0,
ELFW(ST_INFO)(STB_GLOBAL, STT_FUNC), 0,
SHN_UNDEF, "___cxa_atexit");
#ifdef TCC_TARGET_X86_64
ptr = section_ptr_add(text_section, 4);
ptr[0] = 0x55; // pushq %rbp
ptr[1] = 0x48; // movq %rsp, %rbp
ptr[2] = 0x89;
ptr[3] = 0xe5;
for_each_elem(s->reloc, 0, rel, ElfW_Rel) {
int sym_index = ELFW(R_SYM)(rel->r_info);
ptr = section_ptr_add(text_section, 26);
ptr[0] = 0x48; // lea destructor(%rip),%rax
ptr[1] = 0x8d;
ptr[2] = 0x05;
put_elf_reloca(s1->symtab, text_section,
text_section->data_offset - 23,
R_X86_64_PC32, sym_index, -4);
ptr[7] = 0x48; // mov %rax,%rdi
ptr[8] = 0x89;
ptr[9] = 0xc7;
ptr[10] = 0x31; // xorl %ecx, %ecx
ptr[11] = 0xc9;
ptr[12] = 0x89; // movl %ecx, %esi
ptr[13] = 0xce;
ptr[14] = 0x48; // lea mh_execute_header(%rip),%rdx
ptr[15] = 0x8d;
ptr[16] = 0x15;
put_elf_reloca(s1->symtab, text_section,
text_section->data_offset - 9,
R_X86_64_PC32, mh_execute_header, -4);
ptr[21] = 0xe8; // call __cxa_atexit
put_elf_reloca(s1->symtab, text_section,
text_section->data_offset - 4,
R_X86_64_PLT32, at_exit_sym, -4);
}
ptr = section_ptr_add(text_section, 2);
ptr[0] = 0x5d; // pop %rbp
ptr[1] = 0xc3; // ret
#elif defined TCC_TARGET_ARM64
ptr = section_ptr_add(text_section, 8);
write32le(ptr, 0xa9bf7bfd); // stp x29, x30, [sp, #-16]!
write32le(ptr + 4, 0x910003fd); // mov x29, sp
for_each_elem(s->reloc, 0, rel, ElfW_Rel) {
int sym_index = ELFW(R_SYM)(rel->r_info);
ptr = section_ptr_add(text_section, 24);
put_elf_reloc(s1->symtab, text_section,
text_section->data_offset - 24,
R_AARCH64_ADR_PREL_PG_HI21, sym_index);
write32le(ptr, 0x90000000); // adrp x0, destructor@page
put_elf_reloc(s1->symtab, text_section,
text_section->data_offset - 20,
R_AARCH64_LDST8_ABS_LO12_NC, sym_index);
write32le(ptr + 4, 0x91000000); // add x0,x0,destructor@pageoff
write32le(ptr + 8, 0xd2800001); // mov x1, #0
put_elf_reloc(s1->symtab, text_section,
text_section->data_offset - 12,
R_AARCH64_ADR_PREL_PG_HI21, mh_execute_header);
write32le(ptr + 12, 0x90000002); // adrp x2, mh_execute_header@page
put_elf_reloc(s1->symtab, text_section,
text_section->data_offset - 8,
R_AARCH64_LDST8_ABS_LO12_NC, mh_execute_header);
write32le(ptr + 16, 0x91000042); // add x2,x2,mh_execute_header@pageoff
put_elf_reloc(s1->symtab, text_section,
text_section->data_offset - 4,
R_AARCH64_CALL26, at_exit_sym);
write32le(ptr + 20, 0x94000000); // bl __cxa_atexit
}
ptr = section_ptr_add(text_section, 8);
write32le(ptr, 0xa8c17bfd); // ldp x29, x30, [sp], #16
write32le(ptr + 4, 0xd65f03c0); // ret
#endif
s->reloc->data_offset = s->data_offset = 0;
s->sh_flags &= ~SHF_ALLOC;
add_array (s1, ".init_array", init_sym);
}
static void check_relocs(TCCState *s1, struct macho *mo) static void check_relocs(TCCState *s1, struct macho *mo)
{ {
uint8_t *jmp;
Section *s; Section *s;
ElfW_Rel *rel; ElfW_Rel *rel;
ElfW(Sym) *sym; ElfW(Sym) *sym;
int i, type, gotplt_entry, sym_index, for_code; int i, type, gotplt_entry, sym_index, for_code;
int sh_num, debug, bind_offset, la_symbol_offset;
uint32_t *pi, *goti;
struct sym_attr *attr; struct sym_attr *attr;
s1->got = new_section(s1, ".got", SHT_PROGBITS, SHF_ALLOC | SHF_WRITE);
mo->indirsyms = new_section(s1, "LEINDIR", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE); mo->indirsyms = new_section(s1, "LEINDIR", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE);
#ifdef TCC_TARGET_X86_64
jmp = section_ptr_add(mo->stub_helper, 16);
jmp[0] = 0x4c; /* leaq _dyld_private(%rip), %r11 */
jmp[1] = 0x8d;
jmp[2] = 0x1d;
put_elf_reloca(s1->symtab, mo->stub_helper, 3,
R_X86_64_PC32, mo->dyld_private, -4);
jmp[7] = 0x41; /* pushq %r11 */
jmp[8] = 0x53;
jmp[9] = 0xff; /* jmpq *dyld_stub_binder@GOT(%rip) */
jmp[10] = 0x25;
put_elf_reloca(s1->symtab, mo->stub_helper, 11,
R_X86_64_GOTPCREL, mo->dyld_stub_binder, -4);
jmp[15] = 0x90; /* nop */
#elif defined TCC_TARGET_ARM64
jmp = section_ptr_add(mo->stub_helper, 24);
put_elf_reloc(s1->symtab, mo->stub_helper, 0,
R_AARCH64_ADR_PREL_PG_HI21, mo->dyld_private);
write32le(jmp, 0x90000011); // adrp x17, _dyld_private@page
put_elf_reloc(s1->symtab, mo->stub_helper, 4,
R_AARCH64_LDST64_ABS_LO12_NC, mo->dyld_private);
write32le(jmp + 4, 0x91000231); // add x17,x17,_dyld_private@pageoff
write32le(jmp + 8, 0xa9bf47f0); // stp x16/x17, [sp, #-16]!
put_elf_reloc(s1->symtab, mo->stub_helper, 12,
R_AARCH64_ADR_GOT_PAGE, mo->dyld_stub_binder);
write32le(jmp + 12, 0x90000010); // adrp x16, dyld_stub_binder@page
put_elf_reloc(s1->symtab, mo->stub_helper, 16,
R_AARCH64_LD64_GOT_LO12_NC, mo->dyld_stub_binder);
write32le(jmp + 16, 0xf9400210); // ldr x16,[x16,dyld_stub_binder@pageoff]
write32le(jmp + 20, 0xd61f0200); // br x16
#endif
goti = NULL;
mo->nr_plt = mo->n_got = 0;
for (i = 1; i < s1->nb_sections; i++) { for (i = 1; i < s1->nb_sections; i++) {
s = s1->sections[i]; s = s1->sections[i];
if (s->sh_type != SHT_RELX) if (s->sh_type != SHT_RELX)
continue; continue;
sh_num = s1->sections[s->sh_info]->sh_num;
debug = sh_num >= s1->dwlo && sh_num < s1->dwhi;
for_each_elem(s, 0, rel, ElfW_Rel) { for_each_elem(s, 0, rel, ElfW_Rel) {
type = ELFW(R_TYPE)(rel->r_info); type = ELFW(R_TYPE)(rel->r_info);
gotplt_entry = gotplt_entry_type(type); gotplt_entry = gotplt_entry_type(type);
@ -329,44 +554,150 @@ static void check_relocs(TCCState *s1, struct macho *mo)
address due to codegen (i.e. a reloc requiring a got slot). */ address due to codegen (i.e. a reloc requiring a got slot). */
sym_index = ELFW(R_SYM)(rel->r_info); sym_index = ELFW(R_SYM)(rel->r_info);
sym = &((ElfW(Sym) *)symtab_section->data)[sym_index]; sym = &((ElfW(Sym) *)symtab_section->data)[sym_index];
if (sym->st_shndx == SHN_UNDEF if (!debug &&
|| gotplt_entry == ALWAYS_GOTPLT_ENTRY) { (sym->st_shndx == SHN_UNDEF
|| gotplt_entry == ALWAYS_GOTPLT_ENTRY)) {
attr = get_sym_attr(s1, sym_index, 1); attr = get_sym_attr(s1, sym_index, 1);
if (!attr->dyn_index) { if (!attr->dyn_index) {
uint32_t *pi = section_ptr_add(mo->indirsyms, sizeof(*pi));
attr->got_offset = s1->got->data_offset; attr->got_offset = s1->got->data_offset;
attr->plt_offset = -1; attr->plt_offset = -1;
attr->dyn_index = 1; /* used as flag */ attr->dyn_index = 1; /* used as flag */
section_ptr_add(s1->got, PTR_SIZE); section_ptr_add(s1->got, PTR_SIZE);
put_elf_reloc(s1->symtab, s1->got, attr->got_offset,
R_DATA_PTR, sym_index);
goti = tcc_realloc(goti, (mo->n_got + 1) * sizeof(*goti));
if (ELFW(ST_BIND)(sym->st_info) == STB_LOCAL) { if (ELFW(ST_BIND)(sym->st_info) == STB_LOCAL) {
if (sym->st_shndx == SHN_UNDEF) if (sym->st_shndx == SHN_UNDEF)
tcc_error("undefined local symbol???"); tcc_error("undefined local symbo: '%s'",
*pi = INDIRECT_SYMBOL_LOCAL; (char *) symtab_section->link->data + sym->st_name);
/* The pointer slot we generated must point to the goti[mo->n_got++] = INDIRECT_SYMBOL_LOCAL;
symbol, whose address is only known after layout, } else {
so register a simple relocation for that. */ goti[mo->n_got++] = mo->e2msym[sym_index];
put_elf_reloc(s1->symtab, s1->got, attr->got_offset, if (sym->st_shndx == SHN_UNDEF
R_DATA_PTR, sym_index); #ifdef TCC_TARGET_X86_64
} else && type == R_X86_64_GOTPCREL
*pi = mo->e2msym[sym_index]; #elif defined TCC_TARGET_ARM64
&& type == R_AARCH64_ADR_GOT_PAGE
#endif
) {
mo->bind =
tcc_realloc(mo->bind,
(mo->n_bind + 1) *
sizeof(struct bind));
mo->bind[mo->n_bind].section = s1->got->reloc->sh_info;
mo->bind[mo->n_bind].rel = *rel;
mo->bind[mo->n_bind].rel.r_offset = attr->got_offset;
mo->n_bind++;
s1->got->reloc->data_offset -= sizeof (ElfW_Rel);
}
}
} }
if (for_code) { if (for_code && sym->st_shndx == SHN_UNDEF) {
if (attr->plt_offset == -1) { if (attr->plt_offset == -1) {
uint8_t *jmp; pi = section_ptr_add(mo->indirsyms, sizeof(*pi));
*pi = mo->e2msym[sym_index];
mo->nr_plt++;
attr->plt_offset = mo->stubs->data_offset; attr->plt_offset = mo->stubs->data_offset;
#ifdef TCC_TARGET_X86_64
if (type != R_X86_64_PLT32)
continue;
/* __stubs */
jmp = section_ptr_add(mo->stubs, 6); jmp = section_ptr_add(mo->stubs, 6);
jmp[0] = 0xff; /* jmpq *ofs(%rip) */ jmp[0] = 0xff; /* jmpq *__la_symbol_ptr(%rip) */
jmp[1] = 0x25; jmp[1] = 0x25;
put_elf_reloc(s1->symtab, mo->stubs, put_elf_reloca(s1->symtab, mo->stubs,
attr->plt_offset + 2, mo->stubs->data_offset - 4,
R_X86_64_GOTPCREL, sym_index); R_X86_64_PC32, mo->lasym,
mo->la_symbol_ptr->data_offset - 4);
/* __stub_helper */
bind_offset = mo->stub_helper->data_offset + 1;
jmp = section_ptr_add(mo->stub_helper, 10);
jmp[0] = 0x68; /* pushq $bind_offset */
jmp[5] = 0xe9; /* jmpq __stub_helper */
write32le(jmp + 6, -mo->stub_helper->data_offset);
/* __la_symbol_ptr */
la_symbol_offset = mo->la_symbol_ptr->data_offset;
put_elf_reloca(s1->symtab, mo->la_symbol_ptr,
mo->la_symbol_ptr->data_offset,
R_DATA_PTR, mo->helpsym,
mo->stub_helper->data_offset - 10);
jmp = section_ptr_add(mo->la_symbol_ptr, PTR_SIZE);
#elif defined TCC_TARGET_ARM64
if (type != R_AARCH64_CALL26)
continue;
/* __stubs */
jmp = section_ptr_add(mo->stubs, 12);
put_elf_reloca(s1->symtab, mo->stubs,
mo->stubs->data_offset - 12,
R_AARCH64_ADR_PREL_PG_HI21, mo->lasym,
mo->la_symbol_ptr->data_offset);
write32le(jmp, // adrp x16, __la_symbol_ptr@page
0x90000010);
put_elf_reloca(s1->symtab, mo->stubs,
mo->stubs->data_offset - 8,
R_AARCH64_LDST64_ABS_LO12_NC, mo->lasym,
mo->la_symbol_ptr->data_offset);
write32le(jmp + 4, // ldr x16,[x16, __la_symbol_ptr@pageoff]
0xf9400210);
write32le(jmp + 8, // br x16
0xd61f0200);
/* __stub_helper */
bind_offset = mo->stub_helper->data_offset + 8;
jmp = section_ptr_add(mo->stub_helper, 12);
write32le(jmp + 0, // ldr w16, l0
0x18000050);
write32le(jmp + 4, // b stubHelperHeader
0x14000000 +
((-(mo->stub_helper->data_offset - 8) / 4) &
0x3ffffff));
write32le(jmp + 8, 0); // l0: .long bind_offset
/* __la_symbol_ptr */
la_symbol_offset = mo->la_symbol_ptr->data_offset;
put_elf_reloca(s1->symtab, mo->la_symbol_ptr,
mo->la_symbol_ptr->data_offset,
R_DATA_PTR, mo->helpsym,
mo->stub_helper->data_offset - 12);
jmp = section_ptr_add(mo->la_symbol_ptr, PTR_SIZE);
#endif
mo->lazy_bind_rebase =
tcc_realloc(mo->lazy_bind_rebase,
(mo->n_lazy_bind_rebase + 1) *
sizeof(struct lazy_bind_rebase));
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].section =
mo->stub_helper->reloc->sh_info;
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].bind = 1;
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].bind_offset = bind_offset;
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].la_symbol_offset = la_symbol_offset;
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].rel = *rel;
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].rel.r_offset =
attr->plt_offset;
mo->n_lazy_bind_rebase++;
} }
rel->r_info = ELFW(R_INFO)(mo->stubsym, type); rel->r_info = ELFW(R_INFO)(mo->stubsym, type);
rel->r_addend += attr->plt_offset; rel->r_addend += attr->plt_offset;
} }
} }
if (type == R_DATA_PTR) {
mo->lazy_bind_rebase =
tcc_realloc(mo->lazy_bind_rebase,
(mo->n_lazy_bind_rebase + 1) *
sizeof(struct lazy_bind_rebase));
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].section = s->sh_info;
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].bind = 0;
mo->lazy_bind_rebase[mo->n_lazy_bind_rebase].rel = *rel;
mo->n_lazy_bind_rebase++;
}
} }
} }
pi = section_ptr_add(mo->indirsyms, mo->n_got * sizeof(*pi));
memcpy(pi, goti, mo->n_got * sizeof(*pi));
pi = section_ptr_add(mo->indirsyms, mo->nr_plt * sizeof(*pi));
memcpy(pi, mo->indirsyms->data, mo->nr_plt * sizeof(*pi));
tcc_free(goti);
} }
static int check_symbols(TCCState *s1, struct macho *mo) static int check_symbols(TCCState *s1, struct macho *mo)
@ -444,9 +775,12 @@ static void convert_symbol(TCCState *s1, struct macho *mo, struct nlist_64 *pn)
n.n_type = N_ABS, n.n_sect = 0; n.n_type = N_ABS, n.n_sect = 0;
else if (sym->st_shndx >= SHN_LORESERVE) else if (sym->st_shndx >= SHN_LORESERVE)
tcc_error("unhandled ELF symbol section %d %s", sym->st_shndx, name); tcc_error("unhandled ELF symbol section %d %s", sym->st_shndx, name);
else if (!mo->elfsectomacho[sym->st_shndx]) else if (!mo->elfsectomacho[sym->st_shndx]) {
tcc_error("ELF section %d not mapped into Mach-O for symbol %s", int sh_num = s1->sections[sym->st_shndx]->sh_num;
sym->st_shndx, name); if (sh_num < s1->dwlo || sh_num >= s1->dwhi)
tcc_error("ELF section %d(%s) not mapped into Mach-O for symbol %s",
sym->st_shndx, s1->sections[sym->st_shndx]->name, name);
}
else else
n.n_sect = mo->elfsectomacho[sym->st_shndx]; n.n_sect = mo->elfsectomacho[sym->st_shndx];
if (ELFW(ST_BIND)(sym->st_info) == STB_GLOBAL) if (ELFW(ST_BIND)(sym->st_info) == STB_GLOBAL)
@ -465,9 +799,9 @@ static void convert_symbols(TCCState *s1, struct macho *mo)
convert_symbol(s1, mo, pn); convert_symbol(s1, mo, pn);
} }
static int machosymcmp(const void *_a, const void *_b) static int machosymcmp(const void *_a, const void *_b, void *arg)
{ {
TCCState *s1 = tcc_state; TCCState *s1 = arg;
int ea = ((struct nlist_64 *)_a)->n_value; int ea = ((struct nlist_64 *)_a)->n_value;
int eb = ((struct nlist_64 *)_b)->n_value; int eb = ((struct nlist_64 *)_b)->n_value;
ElfSym *sa = (ElfSym *)symtab_section->data + ea; ElfSym *sa = (ElfSym *)symtab_section->data + ea;
@ -492,6 +826,37 @@ static int machosymcmp(const void *_a, const void *_b)
return ea - eb; return ea - eb;
} }
/* cannot use qsort because code has to be reentrant */
static void tcc_qsort (void *base, size_t nel, size_t width,
int (*comp)(const void *, const void *, void *), void *arg)
{
size_t wnel, gap, wgap, i, j, k;
char *a, *b, tmp;
wnel = width * nel;
for (gap = 0; ++gap < nel;)
gap *= 3;
while ( gap /= 3 ) {
wgap = width * gap;
for (i = wgap; i < wnel; i += width) {
for (j = i - wgap; ;j -= wgap) {
a = j + (char *)base;
b = a + wgap;
if ( (*comp)(a, b, arg) <= 0 )
break;
k = width;
do {
tmp = *a;
*a++ = *b;
*b++ = tmp;
} while ( --k );
if (j < wgap)
break;
}
}
}
}
static void create_symtab(TCCState *s1, struct macho *mo) static void create_symtab(TCCState *s1, struct macho *mo)
{ {
int sym_index, sym_end; int sym_index, sym_end;
@ -500,9 +865,30 @@ static void create_symtab(TCCState *s1, struct macho *mo)
/* Stub creation belongs to check_relocs, but we need to create /* Stub creation belongs to check_relocs, but we need to create
the symbol now, so its included in the sorting. */ the symbol now, so its included in the sorting. */
mo->stubs = new_section(s1, "__stubs", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR); mo->stubs = new_section(s1, "__stubs", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR);
mo->stub_helper = new_section(s1, "__stub_helper", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR);
s1->got = new_section(s1, ".got", SHT_PROGBITS, SHF_ALLOC | SHF_WRITE);
mo->la_symbol_ptr = new_section(s1, "__la_symbol_ptr", SHT_PROGBITS, SHF_ALLOC | SHF_WRITE);
mo->stubsym = put_elf_sym(s1->symtab, 0, 0, mo->stubsym = put_elf_sym(s1->symtab, 0, 0,
ELFW(ST_INFO)(STB_LOCAL, STT_SECTION), 0, ELFW(ST_INFO)(STB_LOCAL, STT_SECTION), 0,
mo->stubs->sh_num, ".__stubs"); mo->stubs->sh_num, ".__stubs");
mo->helpsym = put_elf_sym(s1->symtab, 0, 0,
ELFW(ST_INFO)(STB_LOCAL, STT_SECTION), 0,
mo->stub_helper->sh_num, ".__stub_helper");
mo->lasym = put_elf_sym(s1->symtab, 0, 0,
ELFW(ST_INFO)(STB_LOCAL, STT_SECTION), 0,
mo->la_symbol_ptr->sh_num, ".__la_symbol_ptr");
section_ptr_add(data_section, -data_section->data_offset & (PTR_SIZE - 1));
mo->dyld_private = put_elf_sym(s1->symtab, data_section->data_offset, PTR_SIZE,
ELFW(ST_INFO)(STB_LOCAL, STT_OBJECT), 0,
data_section->sh_num, ".__dyld_private");
section_ptr_add(data_section, PTR_SIZE);
mo->dyld_stub_binder = put_elf_sym(s1->symtab, 0, 0,
ELFW(ST_INFO)(STB_GLOBAL, STT_OBJECT), 0,
SHN_UNDEF, "dyld_stub_binder");
mo->rebase = new_section(s1, "REBASE", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE);
mo->binding = new_section(s1, "BINDING", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE);
mo->lazy_binding = new_section(s1, "LAZY_BINDING", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE);
mo->exports = new_section(s1, "EXPORT", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE);
mo->symtab = new_section(s1, "LESYMTAB", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE); mo->symtab = new_section(s1, "LESYMTAB", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE);
mo->strtab = new_section(s1, "LESTRTAB", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE); mo->strtab = new_section(s1, "LESTRTAB", SHT_LINKEDIT, SHF_ALLOC | SHF_WRITE);
@ -515,9 +901,7 @@ static void create_symtab(TCCState *s1, struct macho *mo)
pn[sym_index - 1].n_strx = put_elf_str(mo->strtab, name); pn[sym_index - 1].n_strx = put_elf_str(mo->strtab, name);
pn[sym_index - 1].n_value = sym_index; pn[sym_index - 1].n_value = sym_index;
} }
tcc_enter_state(s1); /* qsort needs global state */ tcc_qsort(pn, sym_end - 1, sizeof(*pn), machosymcmp, s1);
qsort(pn, sym_end - 1, sizeof(*pn), machosymcmp);
tcc_exit_state(s1);
mo->e2msym = tcc_malloc(sym_end * sizeof(*mo->e2msym)); mo->e2msym = tcc_malloc(sym_end * sizeof(*mo->e2msym));
mo->e2msym[0] = -1; mo->e2msym[0] = -1;
for (sym_index = 1; sym_index < sym_end; ++sym_index) { for (sym_index = 1; sym_index < sym_end; ++sym_index) {
@ -530,22 +914,121 @@ const struct {
uint32_t flags; uint32_t flags;
const char *name; const char *name;
} skinfo[sk_last] = { } skinfo[sk_last] = {
/*[sk_unknown] =*/ { 0 }, /*[sk_unknown] =*/ { 0 },
/*[sk_discard] =*/ { 0 }, /*[sk_discard] =*/ { 0 },
/*[sk_text] =*/ { 1, S_REGULAR | S_ATTR_PURE_INSTRUCTIONS /*[sk_text] =*/ { 1, S_REGULAR | S_ATTR_PURE_INSTRUCTIONS
| S_ATTR_SOME_INSTRUCTIONS, "__text" }, | S_ATTR_SOME_INSTRUCTIONS, "__text" },
/*[sk_stubs] =*/ { 0 }, /*[sk_stubs] =*/ { 1, S_REGULAR | S_ATTR_PURE_INSTRUCTIONS | S_SYMBOL_STUBS
/*[sk_ro_data] =*/ { 1, S_REGULAR, "__rodata" }, | S_ATTR_SOME_INSTRUCTIONS , "__stubs" },
/*[sk_uw_info] =*/ { 0 }, /*[sk_stub_helper] =*/ { 1, S_REGULAR | S_ATTR_PURE_INSTRUCTIONS
/*[sk_nl_ptr] =*/ { 2, S_NON_LAZY_SYMBOL_POINTERS, "__got" }, | S_ATTR_SOME_INSTRUCTIONS , "__stub_helper" },
/*[sk_la_ptr] =*/ { 0 }, /*[sk_ro_data] =*/ { 1, S_REGULAR, "__rodata" },
/*[sk_init] =*/ { 2, S_MOD_INIT_FUNC_POINTERS, "__mod_init_func" }, /*[sk_uw_info] =*/ { 0 },
/*[sk_fini] =*/ { 2, S_MOD_TERM_FUNC_POINTERS, "__mod_term_func" }, /*[sk_nl_ptr] =*/ { 2, S_NON_LAZY_SYMBOL_POINTERS, "__got" },
/*[sk_rw_data] =*/ { 2, S_REGULAR, "__data" }, /*[sk_la_ptr] =*/ { 2, S_LAZY_SYMBOL_POINTERS, "__la_symbol_ptr" },
/*[sk_bss] =*/ { 2, S_ZEROFILL, "__bss" }, /*[sk_init] =*/ { 2, S_MOD_INIT_FUNC_POINTERS, "__mod_init_func" },
/*[sk_linkedit] =*/ { 3, S_REGULAR, NULL }, /*[sk_fini] =*/ { 2, S_MOD_TERM_FUNC_POINTERS, "__mod_term_func" },
/*[sk_rw_data] =*/ { 2, S_REGULAR, "__data" },
/*[sk_stab] =*/ { 2, S_REGULAR, "__stab" },
/*[sk_stab_str] =*/ { 2, S_REGULAR, "__stab_str" },
/*[sk_debug_info] =*/ { 2, S_REGULAR, "__debug_info" },
/*[sk_debug_abbrev] =*/ { 2, S_REGULAR, "__debug_abbrev" },
/*[sk_debug_line] =*/ { 2, S_REGULAR, "__debug_line" },
/*[sk_debug_aranges] =*/ { 2, S_REGULAR, "__debug_aranges" },
/*[sk_debug_str] =*/ { 2, S_REGULAR, "__debug_str" },
/*[sk_debug_line_str] =*/ { 2, S_REGULAR, "__debug_line_str" },
/*[sk_bss] =*/ { 2, S_ZEROFILL, "__bss" },
/*[sk_linkedit] =*/ { 3, S_REGULAR, NULL },
}; };
static void set_segment_and_offset(struct macho *mo, addr_t addr,
uint8_t *ptr, int opcode,
Section *sec, addr_t offset)
{
int i;
struct segment_command_64 *seg = NULL;
for (i = 1; i < mo->nseg - 1; i++) {
seg = get_segment(mo, i);
if (addr >= seg->vmaddr && addr < (seg->vmaddr + seg->vmsize))
break;
}
*ptr = opcode | i;
write_uleb128(sec, offset - seg->vmaddr);
}
static void do_bind_rebase(TCCState *s1, struct macho *mo)
{
int i;
uint8_t *ptr;
ElfW(Sym) *sym;
char *name;
for (i = 0; i < mo->n_lazy_bind_rebase; i++) {
int sym_index = ELFW(R_SYM)(mo->lazy_bind_rebase[i].rel.r_info);
Section *s = s1->sections[mo->lazy_bind_rebase[i].section];
sym = &((ElfW(Sym) *)symtab_section->data)[sym_index];
name = (char *) symtab_section->link->data + sym->st_name;
if (mo->lazy_bind_rebase[i].bind) {
write32le(mo->stub_helper->data +
mo->lazy_bind_rebase[i].bind_offset,
mo->lazy_binding->data_offset);
ptr = section_ptr_add(mo->lazy_binding, 1);
set_segment_and_offset(mo, mo->la_symbol_ptr->sh_addr, ptr,
BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB,
mo->lazy_binding,
mo->la_symbol_ptr->sh_addr +
mo->lazy_bind_rebase[i].la_symbol_offset);
ptr = section_ptr_add(mo->lazy_binding, 5 + strlen(name));
*ptr++ = BIND_OPCODE_SET_DYLIB_SPECIAL_IMM |
(BIND_SPECIAL_DYLIB_FLAT_LOOKUP & 0xf);
*ptr++ = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | 0;
strcpy(ptr, name);
ptr += strlen(name) + 1;
*ptr++ = BIND_OPCODE_DO_BIND;
*ptr = BIND_OPCODE_DONE;
}
else {
ptr = section_ptr_add(mo->rebase, 2);
*ptr++ = REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER;
set_segment_and_offset(mo, s->sh_addr, ptr,
REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB,
mo->rebase,
mo->lazy_bind_rebase[i].rel.r_offset +
s->sh_addr);
ptr = section_ptr_add(mo->rebase, 1);
*ptr = REBASE_OPCODE_DO_REBASE_IMM_TIMES | 1;
}
}
for (i = 0; i < mo->n_bind; i++) {
int sym_index = ELFW(R_SYM)(mo->bind[i].rel.r_info);
Section *s = s1->sections[mo->bind[i].section];
sym = &((ElfW(Sym) *)symtab_section->data)[sym_index];
name = (char *) symtab_section->link->data + sym->st_name;
ptr = section_ptr_add(mo->binding, 5 + strlen(name));
*ptr++ = BIND_OPCODE_SET_DYLIB_SPECIAL_IMM |
(BIND_SPECIAL_DYLIB_FLAT_LOOKUP & 0xf);
*ptr++ = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | 0;
strcpy(ptr, name);
ptr += strlen(name) + 1;
*ptr++ = BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER;
set_segment_and_offset(mo, s->sh_addr, ptr,
BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB,
mo->binding,
mo->bind[i].rel.r_offset + s->sh_addr);
ptr = section_ptr_add(mo->binding, 1);
*ptr++ = BIND_OPCODE_DO_BIND;
}
if (mo->rebase->data_offset) {
ptr = section_ptr_add(mo->rebase, 1);
*ptr = REBASE_OPCODE_DONE;
}
tcc_free(mo->lazy_bind_rebase);
tcc_free(mo->bind);
}
static void collect_sections(TCCState *s1, struct macho *mo) static void collect_sections(TCCState *s1, struct macho *mo)
{ {
int i, sk, numsec; int i, sk, numsec;
@ -584,6 +1067,7 @@ static void collect_sections(TCCState *s1, struct macho *mo)
str = (char*)dyldlc + dyldlc->name; str = (char*)dyldlc + dyldlc->name;
strcpy(str, "/usr/lib/dyld"); strcpy(str, "/usr/lib/dyld");
mo->dyldinfo = add_lc(mo, LC_DYLD_INFO_ONLY, sizeof(*mo->dyldinfo));
symlc = add_lc(mo, LC_SYMTAB, sizeof(*symlc)); symlc = add_lc(mo, LC_SYMTAB, sizeof(*symlc));
dysymlc = add_lc(mo, LC_DYSYMTAB, sizeof(*dysymlc)); dysymlc = add_lc(mo, LC_DYSYMTAB, sizeof(*dysymlc));
@ -610,12 +1094,37 @@ static void collect_sections(TCCState *s1, struct macho *mo)
case SHT_FINI_ARRAY: sk = sk_fini; break; case SHT_FINI_ARRAY: sk = sk_fini; break;
case SHT_NOBITS: sk = sk_bss; break; case SHT_NOBITS: sk = sk_bss; break;
case SHT_SYMTAB: sk = sk_discard; break; case SHT_SYMTAB: sk = sk_discard; break;
case SHT_STRTAB: sk = s == stabstr_section ? sk_ro_data : sk_discard; break; case SHT_STRTAB:
if (s == stabstr_section)
sk = sk_stab_str;
else
sk = sk_discard;
break;
case SHT_RELX: sk = sk_discard; break; case SHT_RELX: sk = sk_discard; break;
case SHT_LINKEDIT: sk = sk_linkedit; break; case SHT_LINKEDIT: sk = sk_linkedit; break;
case SHT_PROGBITS: case SHT_PROGBITS:
if (s == s1->got) if (s == mo->stubs)
sk = sk_stubs;
else if (s == mo->stub_helper)
sk = sk_stub_helper;
else if (s == s1->got)
sk = sk_nl_ptr; sk = sk_nl_ptr;
else if (s == mo->la_symbol_ptr)
sk = sk_la_ptr;
else if (s == stab_section)
sk = sk_stab;
else if (s == dwarf_info_section)
sk = sk_debug_info;
else if (s == dwarf_abbrev_section)
sk = sk_debug_abbrev;
else if (s == dwarf_line_section)
sk = sk_debug_line;
else if (s == dwarf_aranges_section)
sk = sk_debug_aranges;
else if (s == dwarf_str_section)
sk = sk_debug_str;
else if (s == dwarf_line_str_section)
sk = sk_debug_line_str;
else if (flags & SHF_EXECINSTR) else if (flags & SHF_EXECINSTR)
sk = sk_text; sk = sk_text;
else if (flags & SHF_WRITE) else if (flags & SHF_WRITE)
@ -637,9 +1146,14 @@ static void collect_sections(TCCState *s1, struct macho *mo)
mo->elfsectomacho = tcc_mallocz(sizeof(*mo->elfsectomacho) * s1->nb_sections); mo->elfsectomacho = tcc_mallocz(sizeof(*mo->elfsectomacho) * s1->nb_sections);
for (sk = sk_unknown; sk < sk_last; sk++) { for (sk = sk_unknown; sk < sk_last; sk++) {
struct section_64 *sec = NULL; struct section_64 *sec = NULL;
#define SEG_PAGE_SIZE 16384
if (sk == sk_linkedit)
do_bind_rebase(s1, mo);
if (seg) { if (seg) {
seg->vmsize = curaddr - seg->vmaddr; seg->vmsize = (curaddr - seg->vmaddr + SEG_PAGE_SIZE - 1) & -SEG_PAGE_SIZE;
seg->filesize = fileofs - seg->fileoff; seg->filesize = (fileofs - seg->fileoff + SEG_PAGE_SIZE - 1) & -SEG_PAGE_SIZE;
curaddr = seg->vmaddr + seg->vmsize;
fileofs = seg->fileoff + seg->filesize;
} }
if (skinfo[sk].seg && mo->sk_to_sect[sk].s) { if (skinfo[sk].seg && mo->sk_to_sect[sk].s) {
uint64_t al = 0; uint64_t al = 0;
@ -652,6 +1166,16 @@ static void collect_sections(TCCState *s1, struct macho *mo)
mo->sk_to_sect[sk].machosect = si; mo->sk_to_sect[sk].machosect = si;
sec = get_section(seg, si); sec = get_section(seg, si);
sec->flags = skinfo[sk].flags; sec->flags = skinfo[sk].flags;
if (sk == sk_stubs)
#ifdef TCC_TARGET_X86_64
sec->reserved2 = 6;
#elif defined TCC_TARGET_ARM64
sec->reserved2 = 12;
#endif
if (sk == sk_nl_ptr)
sec->reserved1 = mo->nr_plt;
if (sk == sk_la_ptr)
sec->reserved1 = mo->nr_plt + mo->n_got;
} }
if (seg->vmaddr == -1) { if (seg->vmaddr == -1) {
curaddr = (curaddr + 4095) & -4096; curaddr = (curaddr + 4095) & -4096;
@ -737,6 +1261,23 @@ static void collect_sections(TCCState *s1, struct macho *mo)
dysymlc->nundefsym = symlc->nsyms - dysymlc->iundefsym; dysymlc->nundefsym = symlc->nsyms - dysymlc->iundefsym;
dysymlc->indirectsymoff = mo->indirsyms->sh_offset; dysymlc->indirectsymoff = mo->indirsyms->sh_offset;
dysymlc->nindirectsyms = mo->indirsyms->data_offset / sizeof(uint32_t); dysymlc->nindirectsyms = mo->indirsyms->data_offset / sizeof(uint32_t);
if (mo->rebase->data_offset) {
mo->dyldinfo->rebase_off = mo->rebase->sh_offset;
mo->dyldinfo->rebase_size = mo->rebase->data_offset;
}
if (mo->binding->data_offset) {
mo->dyldinfo->bind_off = mo->binding->sh_offset;
mo->dyldinfo->bind_size = mo->binding->data_offset;
}
if (mo->lazy_binding->data_offset) {
mo->dyldinfo->lazy_bind_off = mo->lazy_binding->sh_offset;
mo->dyldinfo->lazy_bind_size = mo->lazy_binding->data_offset;
}
if (mo->exports->data_offset) {
mo->dyldinfo->export_off = mo->exports->sh_offset;
mo->dyldinfo->export_size = mo->exports->data_offset;
}
} }
static void macho_write(TCCState *s1, struct macho *mo, FILE *fp) static void macho_write(TCCState *s1, struct macho *mo, FILE *fp)
@ -745,10 +1286,15 @@ static void macho_write(TCCState *s1, struct macho *mo, FILE *fp)
uint64_t fileofs = 0; uint64_t fileofs = 0;
Section *s; Section *s;
mo->mh.mh.magic = MH_MAGIC_64; mo->mh.mh.magic = MH_MAGIC_64;
mo->mh.mh.cputype = 0x1000007; // x86_64 #ifdef TCC_TARGET_X86_64
mo->mh.mh.cpusubtype = 0x80000003;// all | CPU_SUBTYPE_LIB64 mo->mh.mh.cputype = CPU_TYPE_X86_64;
mo->mh.mh.filetype = 2; // MH_EXECUTE mo->mh.mh.cpusubtype = CPU_SUBTYPE_LIB64 | CPU_SUBTYPE_X86_ALL;
mo->mh.mh.flags = 4; // DYLDLINK #elif defined TCC_TARGET_ARM64
mo->mh.mh.cputype = CPU_TYPE_ARM64;
mo->mh.mh.cpusubtype = CPU_SUBTYPE_ARM64_ALL;
#endif
mo->mh.mh.filetype = MH_EXECUTE;
mo->mh.mh.flags = MH_DYLDLINK | MH_PIE;
mo->mh.mh.ncmds = mo->nlc; mo->mh.mh.ncmds = mo->nlc;
mo->mh.mh.sizeofcmds = 0; mo->mh.mh.sizeofcmds = 0;
for (i = 0; i < mo->nlc; i++) for (i = 0; i < mo->nlc; i++)
@ -803,6 +1349,7 @@ ST_FUNC int macho_output_file(TCCState *s1, const char *filename)
printf("<- %s\n", filename); printf("<- %s\n", filename);
tcc_add_runtime(s1); tcc_add_runtime(s1);
tcc_macho_add_destructor(s1);
resolve_common_syms(s1); resolve_common_syms(s1);
create_symtab(s1, &mo); create_symtab(s1, &mo);
check_relocs(s1, &mo); check_relocs(s1, &mo);
@ -959,8 +1506,13 @@ ST_FUNC int macho_load_dll(TCCState * s1, int fd, const char* filename, int lev)
fh.nfat_arch * sizeof(*fa)); fh.nfat_arch * sizeof(*fa));
swap = fh.magic == FAT_CIGAM; swap = fh.magic == FAT_CIGAM;
for (i = 0; i < SWAP(fh.nfat_arch); i++) for (i = 0; i < SWAP(fh.nfat_arch); i++)
if (SWAP(fa[i].cputype) == 0x01000007 /* CPU_TYPE_X86_64 */ #ifdef TCC_TARGET_X86_64
&& SWAP(fa[i].cpusubtype) == 3) /* CPU_SUBTYPE_X86_ALL */ if (SWAP(fa[i].cputype) == CPU_TYPE_X86_64
&& SWAP(fa[i].cpusubtype) == CPU_SUBTYPE_X86_ALL)
#elif defined TCC_TARGET_ARM64
if (SWAP(fa[i].cputype) == CPU_TYPE_ARM64
&& SWAP(fa[i].cpusubtype) == CPU_SUBTYPE_ARM64_ALL)
#endif
break; break;
if (i == SWAP(fh.nfat_arch)) { if (i == SWAP(fh.nfat_arch)) {
tcc_free(fa); tcc_free(fa);

View File

@ -3648,7 +3648,7 @@ void test_asm_dead_code(void)
void test_asm_call(void) void test_asm_call(void)
{ {
#if defined __x86_64__ && !defined _WIN64 #if defined __x86_64__ && !defined _WIN64 && !defined(__APPLE__)
static char str[] = "PATH"; static char str[] = "PATH";
char *s; char *s;
/* This tests if a reference to an undefined symbol from an asm /* This tests if a reference to an undefined symbol from an asm
@ -3660,10 +3660,8 @@ void test_asm_call(void)
tested here). */ tested here). */
/* two pushes so stack remains aligned */ /* two pushes so stack remains aligned */
asm volatile ("push %%rdi; push %%rdi; mov %0, %%rdi;" asm volatile ("push %%rdi; push %%rdi; mov %0, %%rdi;"
#if 1 && !defined(__TINYC__) && (defined(__PIC__) || defined(__PIE__)) && !defined(__APPLE__) #if 1 && !defined(__TINYC__) && (defined(__PIC__) || defined(__PIE__))
"call getenv@plt;" "call getenv@plt;"
#elif defined(__APPLE__)
"call _getenv;"
#else #else
"call getenv;" "call getenv;"
#endif #endif
@ -3892,7 +3890,7 @@ void builtin_test(void)
//printf("bera: %p\n", __builtin_extract_return_addr((void*)43)); //printf("bera: %p\n", __builtin_extract_return_addr((void*)43));
} }
#ifdef _WIN32 #if defined _WIN32 || defined __APPLE__
void weak_test(void) {} void weak_test(void) {}
#else #else
extern int __attribute__((weak)) weak_f1(void); extern int __attribute__((weak)) weak_f1(void);

View File

@ -14,9 +14,11 @@
abort(); \ abort(); \
} while (0) } while (0)
#ifndef __APPLE__
#if defined __x86_64__ || defined __aarch64__ || defined __riscv #if defined __x86_64__ || defined __aarch64__ || defined __riscv
#define HAS_64BITS #define HAS_64BITS
#endif #endif
#endif
typedef struct { typedef struct {
atomic_flag flag; atomic_flag flag;