Fix fetch_and_add code

Change type from signed char to int.
Make assembly code work with tcc and gcc.
This commit is contained in:
herman ten brugge 2020-06-18 07:21:48 +02:00
parent b5faa45d90
commit b2d351e0ec
4 changed files with 53 additions and 29 deletions

View File

@ -273,8 +273,8 @@ static unsigned char print_calls;
static unsigned char print_heap;
static unsigned char print_statistic;
static unsigned char no_strdup;
static signed char never_fatal;
static signed char no_checking = 1;
static int never_fatal;
static int no_checking = 1;
static char exec[100];
#if BOUND_STATISTIC
@ -325,22 +325,22 @@ static unsigned long long bound_splay_delete;
#endif
/* currently only i386/x86_64 supported. Change for other platforms */
static void fetch_and_add(signed char* variable, signed char value)
static void fetch_and_add(int* variable, int value)
{
#if defined __i386__ || defined __x86_64__
__asm__ volatile("lock; addb %0, %1"
__asm__ volatile("lock; addl %0, %1"
: "+r" (value), "+m" (*variable) // input+output
: // No input-only
: "memory"
);
#elif defined __arm__
extern fetch_and_add_arm(signed char* variable, signed char value);
extern void fetch_and_add_arm(int* variable, int value);
fetch_and_add_arm(variable, value);
#elif defined __aarch64__
extern fetch_and_add_arm64(signed char* variable, signed char value);
extern void fetch_and_add_arm64(int* variable, int value);
fetch_and_add_arm64(variable, value);
#elif defined __riscv
extern fetch_and_add_riscv64(signed char* variable, signed char value);
extern void fetch_and_add_riscv64(int* variable, int value);
fetch_and_add_riscv64(variable, value);
#else
*variable += value;

View File

@ -3,14 +3,26 @@
.global fetch_and_add_arm
.type fetch_and_add_arm, %function
fetch_and_add_arm:
.int 0xee070fba # mcr 15, 0, r0, cr7, cr10, {5}
.int 0xe1903f9f # ldrex r3, [r0]
.int 0xe2833001 # add r3, r3, #1
.int 0xe1802f93 # strex r2, r3, [r0]
.int 0xe3520000 # cmp r2, #0
.int 0x1afffffa # bne 4 <fetch_and_add_arm+0x4>
.int 0xee070fba # mcr 15, 0, r0, cr7, cr10, {5}
.int 0xe1a00003 # mov r0, r3
.int 0xe12fff1e # bx lr
#ifdef __TINYC__
.int 0xf57ff05b
.int 0xe1903f9f
.int 0xe0833001
.int 0xe1802f93
.int 0xe3520000
.int 0x1afffffa
.int 0xf57ff05b
.int 0xe12fff1e
#else
.arch armv7-a
dmb ish
.L0:
ldrex r3, [r0]
add r3, r3, r1
strex r2, r3, [r0]
cmp r2, #0
bne .L0
dmb ish
bx lr
#endif
.size fetch_and_add_arm, .-fetch_and_add_arm

View File

@ -3,12 +3,20 @@
.global fetch_and_add_arm64
.type fetch_and_add_arm64, %function
fetch_and_add_arm64:
.int 0x885f7c01 # ldxr w1, [x0]
.int 0x11000421 # add w1, w1, #0x1
.int 0x8802fc01 # stlxr w2, w1, [x0]
.int 0x35ffffa2 # cbnz w2, 0 <fetch_and_add_arm64>
.int 0xd5033bbf # dmb ish
.int 0x2a0103e0 # mov w0, w1
.int 0xd65f03c0 # ret
#ifdef __TINYC__
.int 0x885f7c02
.int 0x0b010042
.int 0x8803fc02
.int 0x35ffffa3
.int 0xd5033bbf
.int 0xd65f03c0
#else
ldxr w2, [x0]
add w2, w2, w1
stlxr w3, w2, [x0]
cbnz w3, fetch_and_add_arm64
dmb ish
ret
#endif
.size fetch_and_add_arm64, .-fetch_and_add_arm64
.size fetch_and_add_arm64, .-fetch_and_add_arm64

View File

@ -3,10 +3,14 @@
.global fetch_and_add_riscv64
.type fetch_and_add_riscv64, %function
fetch_and_add_riscv64:
.short 0x4705 # li a4,1
.int 0x0f50000f # fence iorw,ow
.int 0x04e527af # amoadd.w.aq a5,a4,(a0)
.int 0x0017851b # addiw a0,a5,1
.short 0x8082 # ret
#ifdef __TINYC__
.int 0x0f50000f
.int 0x004b5202f
.short 0x8082
#else
fence iorw,ow
amoadd.w.aq zero,a1,0(a0)
ret
#endif
.size fetch_and_add_riscv64, .-fetch_and_add_riscv64