2009-05-06 02:17:11 +08:00
|
|
|
/*
|
|
|
|
* TCC - Tiny C Compiler
|
2022-05-09 23:02:09 +08:00
|
|
|
*
|
2009-05-06 02:17:11 +08:00
|
|
|
* Copyright (c) 2001-2004 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*/
|
|
|
|
|
2019-12-11 07:37:18 +08:00
|
|
|
#define USING_GLOBALS
|
2009-12-20 08:53:49 +08:00
|
|
|
#include "tcc.h"
|
2019-12-11 07:37:18 +08:00
|
|
|
|
2009-12-20 08:53:49 +08:00
|
|
|
/********************************************************/
|
|
|
|
/* global variables */
|
|
|
|
|
|
|
|
/* loc : local variable index
|
|
|
|
ind : output code index
|
|
|
|
rsym: return symbol
|
|
|
|
anon_sym: anonymous symbol index
|
|
|
|
*/
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_DATA int rsym, anon_sym, ind, loc;
|
|
|
|
|
|
|
|
ST_DATA Sym *global_stack;
|
|
|
|
ST_DATA Sym *local_stack;
|
|
|
|
ST_DATA Sym *define_stack;
|
|
|
|
ST_DATA Sym *global_label_stack;
|
|
|
|
ST_DATA Sym *local_label_stack;
|
|
|
|
|
|
|
|
static Sym *sym_free_first;
|
|
|
|
static void **sym_pools;
|
|
|
|
static int nb_sym_pools;
|
|
|
|
|
|
|
|
static Sym *all_cleanups, *pending_gotos;
|
|
|
|
static int local_scope;
|
|
|
|
static int in_sizeof;
|
2022-11-29 14:56:26 +08:00
|
|
|
static int constant_p;
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_DATA char debug_modes;
|
|
|
|
|
|
|
|
ST_DATA SValue *vtop;
|
|
|
|
static SValue _vstack[1 + VSTACK_SIZE];
|
|
|
|
#define vstack (_vstack + 1)
|
|
|
|
|
|
|
|
ST_DATA int const_wanted; /* true if constant wanted */
|
|
|
|
ST_DATA int nocode_wanted; /* no code generation wanted */
|
2020-01-16 06:32:40 +08:00
|
|
|
#define unevalmask 0xffff /* unevaluated subexpression */
|
2021-10-22 13:39:54 +08:00
|
|
|
#define NODATA_WANTED (nocode_wanted > 0) /* no static data output wanted either */
|
2022-08-20 18:58:56 +08:00
|
|
|
#define DATA_ONLY_WANTED 0x80000000 /* ON outside of functions and for static initializers */
|
2021-10-22 13:39:54 +08:00
|
|
|
#define CODE_OFF() (nocode_wanted |= 0x20000000)
|
|
|
|
#define CODE_ON() (nocode_wanted &= ~0x20000000)
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_DATA int global_expr; /* true if compound literals must be allocated globally (used during initializers parsing */
|
|
|
|
ST_DATA CType func_vt; /* current function return type (used by return instruction) */
|
|
|
|
ST_DATA int func_var; /* true if current function is variadic (used by return instruction) */
|
|
|
|
ST_DATA int func_vc;
|
2022-05-09 23:02:09 +08:00
|
|
|
ST_DATA int func_ind;
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_DATA const char *funcname;
|
|
|
|
ST_DATA CType int_type, func_old_type, char_type, char_pointer_type;
|
|
|
|
static CString initstr;
|
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
#if PTR_SIZE == 4
|
|
|
|
#define VT_SIZE_T (VT_INT | VT_UNSIGNED)
|
|
|
|
#define VT_PTRDIFF_T VT_INT
|
|
|
|
#elif LONG_SIZE == 4
|
|
|
|
#define VT_SIZE_T (VT_LLONG | VT_UNSIGNED)
|
|
|
|
#define VT_PTRDIFF_T VT_LLONG
|
|
|
|
#else
|
|
|
|
#define VT_SIZE_T (VT_LONG | VT_LLONG | VT_UNSIGNED)
|
|
|
|
#define VT_PTRDIFF_T (VT_LONG | VT_LLONG)
|
|
|
|
#endif
|
2009-12-20 08:53:49 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static struct switch_t {
|
|
|
|
struct case_t {
|
|
|
|
int64_t v1, v2;
|
|
|
|
int sym;
|
|
|
|
} **p; int n; /* list of case ranges */
|
|
|
|
int def_sym; /* default symbol */
|
2022-07-12 23:07:33 +08:00
|
|
|
int nocode_wanted;
|
2021-10-22 13:39:54 +08:00
|
|
|
int *bsym;
|
|
|
|
struct scope *scope;
|
|
|
|
struct switch_t *prev;
|
|
|
|
SValue sv;
|
|
|
|
} *cur_switch; /* current switch */
|
|
|
|
|
|
|
|
#define MAX_TEMP_LOCAL_VARIABLE_NUMBER 8
|
|
|
|
/*list of temporary local variables on the stack in current function. */
|
|
|
|
static struct temp_local_variable {
|
|
|
|
int location; //offset on stack. Svalue.c.i
|
|
|
|
short size;
|
|
|
|
short align;
|
|
|
|
} arr_temp_local_vars[MAX_TEMP_LOCAL_VARIABLE_NUMBER];
|
|
|
|
static int nb_temp_local_vars;
|
|
|
|
|
|
|
|
static struct scope {
|
|
|
|
struct scope *prev;
|
|
|
|
struct { int loc, locorig, num; } vla;
|
|
|
|
struct { Sym *s; int n; } cl;
|
|
|
|
int *bsym, *csym;
|
|
|
|
Sym *lstk, *llstk;
|
|
|
|
} *cur_scope, *loop_scope, *root_scope;
|
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
typedef struct {
|
|
|
|
Section *sec;
|
|
|
|
int local_offset;
|
|
|
|
Sym *flex_array_ref;
|
|
|
|
} init_params;
|
|
|
|
|
2021-08-02 02:04:46 +08:00
|
|
|
#if 1
|
|
|
|
#define precedence_parser
|
2021-10-22 13:39:54 +08:00
|
|
|
static void init_prec(void);
|
2021-08-02 02:04:46 +08:00
|
|
|
#endif
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_cast(CType *type);
|
|
|
|
static void gen_cast_s(int t);
|
2009-12-20 08:53:49 +08:00
|
|
|
static inline CType *pointed_type(CType *type);
|
|
|
|
static int is_compatible_types(CType *type1, CType *type2);
|
2022-07-04 21:24:46 +08:00
|
|
|
static int parse_btype(CType *type, AttributeDef *ad, int ignore_label);
|
2021-10-22 13:39:54 +08:00
|
|
|
static CType *type_decl(CType *type, AttributeDef *ad, int *v, int td);
|
|
|
|
static void parse_expr_type(CType *type);
|
|
|
|
static void init_putv(init_params *p, CType *type, unsigned long c);
|
|
|
|
static void decl_initializer(init_params *p, CType *type, unsigned long c, int flags);
|
|
|
|
static void block(int is_expr);
|
|
|
|
static void decl_initializer_alloc(CType *type, AttributeDef *ad, int r, int has_init, int v, int scope);
|
2022-10-15 02:10:38 +08:00
|
|
|
static int decl(int l);
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_eq(void);
|
2021-12-08 17:49:28 +08:00
|
|
|
static void vpush_type_size(CType *type, int *a);
|
2017-07-09 18:38:25 +08:00
|
|
|
static int is_compatible_unqualified_types(CType *type1, CType *type2);
|
2021-10-22 13:39:54 +08:00
|
|
|
static inline int64_t expr_const64(void);
|
|
|
|
static void vpush64(int ty, unsigned long long v);
|
|
|
|
static void vpush(CType *type);
|
|
|
|
static int gvtst(int inv, int t);
|
|
|
|
static void gen_inline_functions(TCCState *s);
|
|
|
|
static void free_inline_functions(TCCState *s);
|
|
|
|
static void skip_or_save_block(TokenString **str);
|
|
|
|
static void gv_dup(void);
|
|
|
|
static int get_temp_local_var(int size,int align);
|
2019-01-10 03:32:23 +08:00
|
|
|
static void clear_temp_local_var_list();
|
2021-10-22 13:39:54 +08:00
|
|
|
static void cast_error(CType *st, CType *dt);
|
2009-12-20 08:53:49 +08:00
|
|
|
|
2022-08-20 18:58:56 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* Automagical code suppression */
|
|
|
|
|
|
|
|
/* Clear 'nocode_wanted' at forward label if it was used */
|
|
|
|
ST_FUNC void gsym(int t)
|
|
|
|
{
|
|
|
|
if (t) {
|
|
|
|
gsym_addr(t, ind);
|
|
|
|
CODE_ON();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear 'nocode_wanted' if current pc is a label */
|
|
|
|
static int gind()
|
|
|
|
{
|
|
|
|
int t = ind;
|
|
|
|
CODE_ON();
|
|
|
|
if (debug_modes)
|
|
|
|
tcc_tcov_block_begin(tcc_state);
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set 'nocode_wanted' after unconditional (backwards) jump */
|
|
|
|
static void gjmp_addr_acs(int t)
|
|
|
|
{
|
|
|
|
gjmp_addr(t);
|
|
|
|
CODE_OFF();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set 'nocode_wanted' after unconditional (forwards) jump */
|
|
|
|
static int gjmp_acs(int t)
|
|
|
|
{
|
|
|
|
t = gjmp(t);
|
|
|
|
CODE_OFF();
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* These are #undef'd at the end of this file */
|
|
|
|
#define gjmp_addr gjmp_addr_acs
|
|
|
|
#define gjmp gjmp_acs
|
2022-05-09 23:02:09 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
|
2009-12-20 08:53:49 +08:00
|
|
|
ST_INLN int is_float(int t)
|
|
|
|
{
|
2019-12-17 01:44:35 +08:00
|
|
|
int bt = t & VT_BTYPE;
|
|
|
|
return bt == VT_LDOUBLE
|
|
|
|
|| bt == VT_DOUBLE
|
|
|
|
|| bt == VT_FLOAT
|
|
|
|
|| bt == VT_QFLOAT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_integer_btype(int bt)
|
|
|
|
{
|
|
|
|
return bt == VT_BYTE
|
2019-12-17 01:51:28 +08:00
|
|
|
|| bt == VT_BOOL
|
2019-12-17 01:44:35 +08:00
|
|
|
|| bt == VT_SHORT
|
|
|
|
|| bt == VT_INT
|
|
|
|
|| bt == VT_LLONG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int btype_size(int bt)
|
|
|
|
{
|
|
|
|
return bt == VT_BYTE || bt == VT_BOOL ? 1 :
|
|
|
|
bt == VT_SHORT ? 2 :
|
|
|
|
bt == VT_INT ? 4 :
|
|
|
|
bt == VT_LLONG ? 8 :
|
|
|
|
bt == VT_PTR ? PTR_SIZE : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns function return register from type */
|
|
|
|
static int R_RET(int t)
|
|
|
|
{
|
|
|
|
if (!is_float(t))
|
|
|
|
return REG_IRET;
|
|
|
|
#ifdef TCC_TARGET_X86_64
|
|
|
|
if ((t & VT_BTYPE) == VT_LDOUBLE)
|
|
|
|
return TREG_ST0;
|
|
|
|
#elif defined TCC_TARGET_RISCV64
|
|
|
|
if ((t & VT_BTYPE) == VT_LDOUBLE)
|
|
|
|
return REG_IRET;
|
|
|
|
#endif
|
|
|
|
return REG_FRET;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns 2nd function return register, if any */
|
|
|
|
static int R2_RET(int t)
|
|
|
|
{
|
|
|
|
t &= VT_BTYPE;
|
|
|
|
#if PTR_SIZE == 4
|
|
|
|
if (t == VT_LLONG)
|
|
|
|
return REG_IRE2;
|
|
|
|
#elif defined TCC_TARGET_X86_64
|
|
|
|
if (t == VT_QLONG)
|
|
|
|
return REG_IRE2;
|
|
|
|
if (t == VT_QFLOAT)
|
|
|
|
return REG_FRE2;
|
|
|
|
#elif defined TCC_TARGET_RISCV64
|
|
|
|
if (t == VT_LDOUBLE)
|
|
|
|
return REG_IRE2;
|
|
|
|
#endif
|
|
|
|
return VT_CONST;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns true for two-word types */
|
|
|
|
#define USING_TWO_WORDS(t) (R2_RET(t) != VT_CONST)
|
|
|
|
|
|
|
|
/* put function return registers to stack value */
|
|
|
|
static void PUT_R_RET(SValue *sv, int t)
|
|
|
|
{
|
|
|
|
sv->r = R_RET(t), sv->r2 = R2_RET(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns function return register class for type t */
|
|
|
|
static int RC_RET(int t)
|
|
|
|
{
|
|
|
|
return reg_classes[R_RET(t)] & ~(RC_FLOAT | RC_INT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns generic register class for type t */
|
|
|
|
static int RC_TYPE(int t)
|
|
|
|
{
|
|
|
|
if (!is_float(t))
|
|
|
|
return RC_INT;
|
|
|
|
#ifdef TCC_TARGET_X86_64
|
|
|
|
if ((t & VT_BTYPE) == VT_LDOUBLE)
|
|
|
|
return RC_ST0;
|
|
|
|
if ((t & VT_BTYPE) == VT_QFLOAT)
|
|
|
|
return RC_FRET;
|
|
|
|
#elif defined TCC_TARGET_RISCV64
|
|
|
|
if ((t & VT_BTYPE) == VT_LDOUBLE)
|
|
|
|
return RC_INT;
|
|
|
|
#endif
|
|
|
|
return RC_FLOAT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns 2nd register class corresponding to t and rc */
|
|
|
|
static int RC2_TYPE(int t, int rc)
|
|
|
|
{
|
|
|
|
if (!USING_TWO_WORDS(t))
|
|
|
|
return 0;
|
|
|
|
#ifdef RC_IRE2
|
|
|
|
if (rc == RC_IRET)
|
|
|
|
return RC_IRE2;
|
|
|
|
#endif
|
|
|
|
#ifdef RC_FRE2
|
|
|
|
if (rc == RC_FRET)
|
|
|
|
return RC_FRE2;
|
|
|
|
#endif
|
|
|
|
if (rc & RC_FLOAT)
|
|
|
|
return RC_FLOAT;
|
|
|
|
return RC_INT;
|
2009-12-20 08:53:49 +08:00
|
|
|
}
|
|
|
|
|
2011-08-01 07:10:36 +08:00
|
|
|
/* we use our own 'finite' function to avoid potential problems with
|
|
|
|
non standard math libs */
|
|
|
|
/* XXX: endianness dependent */
|
|
|
|
ST_FUNC int ieee_finite(double d)
|
|
|
|
{
|
2014-01-07 21:57:07 +08:00
|
|
|
int p[4];
|
|
|
|
memcpy(p, &d, sizeof(double));
|
2011-08-01 07:10:36 +08:00
|
|
|
return ((unsigned)((p[1] | 0x800fffff) + 1)) >> 31;
|
|
|
|
}
|
|
|
|
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
/* compiling intel long double natively */
|
|
|
|
#if (defined __i386__ || defined __x86_64__) \
|
|
|
|
&& (defined TCC_TARGET_I386 || defined TCC_TARGET_X86_64)
|
|
|
|
# define TCC_IS_NATIVE_387
|
|
|
|
#endif
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void test_lvalue(void)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!(vtop->r & VT_LVAL))
|
|
|
|
expect("lvalue");
|
2009-12-20 08:53:49 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void check_vstack(void)
|
tccpp: fix issues, add tests
* fix some macro expansion issues
* add some pp tests in tests/pp
* improved tcc -E output for better diff'ability
* remove -dD feature (quirky code, exotic feature,
didn't work well)
Based partially on ideas / researches from PipCet
Some issues remain with VA_ARGS macros (if used in a
rather tricky way).
Also, to keep it simple, the pp doesn't automtically
add any extra spaces to separate tokens which otherwise
would form wrong tokens if re-read from tcc -E output
(such as '+' '=') GCC does that, other compilers don't.
* cleanups
- #line 01 "file" / # 01 "file" processing
- #pragma comment(lib,"foo")
- tcc -E: forward some pragmas to output (pack, comment(lib))
- fix macro parameter list parsing mess from
a3fc54345949535524d01319e1ca6378b7c2c201
a715d7143d9d17da17e67fec6af1c01409a71a31
(some coffee might help, next time ;)
- introduce TOK_PPSTR - to have character constants as
written in the file (similar to TOK_PPNUM)
- allow '\' appear in macros
- new functions begin/end_macro to:
- fix switching macro levels during expansion
- allow unget_tok to unget more than one tok
- slight speedup by using bitflags in isidnum_table
Also:
- x86_64.c : fix decl after statements
- i386-gen,c : fix a vstack leak with VLA on windows
- configure/Makefile : build on windows (MSYS) was broken
- tcc_warning: fflush stderr to keep output order (win32)
2015-05-09 20:29:39 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop != vstack - 1)
|
|
|
|
tcc_error("internal compiler error: vstack leak (%d)",
|
|
|
|
(int)(vtop - vstack + 1));
|
tccpp: fix issues, add tests
* fix some macro expansion issues
* add some pp tests in tests/pp
* improved tcc -E output for better diff'ability
* remove -dD feature (quirky code, exotic feature,
didn't work well)
Based partially on ideas / researches from PipCet
Some issues remain with VA_ARGS macros (if used in a
rather tricky way).
Also, to keep it simple, the pp doesn't automtically
add any extra spaces to separate tokens which otherwise
would form wrong tokens if re-read from tcc -E output
(such as '+' '=') GCC does that, other compilers don't.
* cleanups
- #line 01 "file" / # 01 "file" processing
- #pragma comment(lib,"foo")
- tcc -E: forward some pragmas to output (pack, comment(lib))
- fix macro parameter list parsing mess from
a3fc54345949535524d01319e1ca6378b7c2c201
a715d7143d9d17da17e67fec6af1c01409a71a31
(some coffee might help, next time ;)
- introduce TOK_PPSTR - to have character constants as
written in the file (similar to TOK_PPNUM)
- allow '\' appear in macros
- new functions begin/end_macro to:
- fix switching macro levels during expansion
- allow unget_tok to unget more than one tok
- slight speedup by using bitflags in isidnum_table
Also:
- x86_64.c : fix decl after statements
- i386-gen,c : fix a vstack leak with VLA on windows
- configure/Makefile : build on windows (MSYS) was broken
- tcc_warning: fflush stderr to keep output order (win32)
2015-05-09 20:29:39 +08:00
|
|
|
}
|
|
|
|
|
2016-10-14 01:21:43 +08:00
|
|
|
/* vstack debugging aid */
|
|
|
|
#if 0
|
|
|
|
void pv (const char *lbl, int a, int b)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = a; i < a + b; ++i) {
|
2021-10-22 13:39:54 +08:00
|
|
|
SValue *p = &vtop[-i];
|
2016-10-14 01:21:43 +08:00
|
|
|
printf("%s vtop[-%d] : type.t:%04x r:%04x r2:%04x c.i:%d\n",
|
|
|
|
lbl, i, p->type.t, p->r, p->r2, (int)p->c.i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-02-21 01:58:08 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
2019-12-14 19:31:03 +08:00
|
|
|
/* initialize vstack and types. This must be done also for tcc -E */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void tccgen_init(TCCState *s1)
|
2017-02-21 01:58:08 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop = vstack - 1;
|
|
|
|
memset(vtop, 0, sizeof *vtop);
|
2017-02-21 01:58:08 +08:00
|
|
|
|
|
|
|
/* define some often used types */
|
2021-10-22 13:39:54 +08:00
|
|
|
int_type.t = VT_INT;
|
|
|
|
|
|
|
|
char_type.t = VT_BYTE;
|
|
|
|
if (s1->char_is_unsigned)
|
|
|
|
char_type.t |= VT_UNSIGNED;
|
|
|
|
char_pointer_type = char_type;
|
|
|
|
mk_pointer(&char_pointer_type);
|
|
|
|
|
|
|
|
func_old_type.t = VT_FUNC;
|
|
|
|
func_old_type.ref = sym_push(SYM_FIELD, &int_type, 0, 0);
|
|
|
|
func_old_type.ref->f.func_call = FUNC_CDECL;
|
|
|
|
func_old_type.ref->f.func_type = FUNC_OLD;
|
2020-01-21 21:23:57 +08:00
|
|
|
#ifdef precedence_parser
|
2021-10-22 13:39:54 +08:00
|
|
|
init_prec();
|
2020-01-21 21:23:57 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
cstr_new(&initstr);
|
2019-12-14 19:31:03 +08:00
|
|
|
}
|
2017-02-21 01:58:08 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC int tccgen_compile(TCCState *s1)
|
2019-12-14 19:31:03 +08:00
|
|
|
{
|
|
|
|
cur_text_section = NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
funcname = "";
|
2022-05-09 23:02:09 +08:00
|
|
|
func_ind = -1;
|
2021-10-22 13:39:54 +08:00
|
|
|
anon_sym = SYM_FIRST_ANOM;
|
|
|
|
const_wanted = 0;
|
2022-08-20 18:58:56 +08:00
|
|
|
nocode_wanted = DATA_ONLY_WANTED; /* no code outside of functions */
|
2021-10-22 13:39:54 +08:00
|
|
|
local_scope = 0;
|
Add dwarf support
The new gcc12 release does not support stabs any more.
This was a good reason to add support for dwarf.
The stabs code still works and is used if configure option --dwarf
is not used.
Tested on x86_64, i386, arm, arm64, riscv64 with dwarf-5.
Some debuggers may not support dwarf-5. Try using older dwarf versions
i that case.
The tccmacho.c code probably need some support for dwarf.
arm-gen.c, arm64-gen.c, i386-gen.c, riscv64-gen.c, x86_64-gen.
- fix get_sym_ref symbol size
arm-link.c, arm64-link.c, i386-link.c, riscv64-link.c, x86_64-link.c
- add R_DATA_32U
libtcc.c:
- parse -gdwarf option
tcc.c:
- add dwarf option
tcc.h:
- add dwarf option and sections
tccelf.c:
- init dwarf sections
- avoid adding sh_addr for dwarf sections
- remove dwarf relocs for output dll
- add dwarf sections for tccrun
tccgen.c:
- add dwarf defines + global data
- add dwarf_* functions
- mix dwarf code with stabs code
- a trick is used to emit function name in .debug_line section so
only this section has to be parsed instead of .debug_info and
.debug_abbrev.
- fix init debug_modes
tccrun.c:
- add dwarf sections in rt_context
- init them in tcc_run
- add new dwarf code rt_printline_dwarf to find file/function
dwarf.h:
- New file
tcc-doc.texi:
- document dwarf
configure:
- add dwarf option
lib/Makefile
- change -gstabs into -gdwarf
lib/bt-exe.c, tests/tests2/Makefile, tests/tests2/126_bound_global:
- Add __bound_init call
- Add new testcase to test it
2022-05-05 15:10:37 +08:00
|
|
|
debug_modes = (s1->do_debug ? 1 : 0) | s1->test_coverage << 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
|
|
|
|
tcc_debug_start(s1);
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_tcov_start (s1);
|
2016-10-15 21:55:31 +08:00
|
|
|
#ifdef TCC_TARGET_ARM
|
2021-10-22 13:39:54 +08:00
|
|
|
arm_init(s1);
|
2016-10-15 21:55:31 +08:00
|
|
|
#endif
|
2017-07-21 04:21:27 +08:00
|
|
|
#ifdef INC_DEBUG
|
|
|
|
printf("%s: **** new file\n", file->filename);
|
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_flags = PARSE_FLAG_PREPROCESS | PARSE_FLAG_TOK_NUM | PARSE_FLAG_TOK_STR;
|
|
|
|
next();
|
|
|
|
decl(VT_CONST);
|
|
|
|
gen_inline_functions(s1);
|
|
|
|
check_vstack();
|
2016-10-15 21:55:31 +08:00
|
|
|
/* end of translation unit info */
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_debug_end(s1);
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_tcov_end(s1);
|
2017-07-21 04:21:27 +08:00
|
|
|
return 0;
|
2016-10-15 21:55:31 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void tccgen_finish(TCCState *s1)
|
2019-12-11 07:37:18 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
cstr_free(&initstr);
|
|
|
|
free_inline_functions(s1);
|
|
|
|
sym_pop(&global_stack, NULL, 0);
|
|
|
|
sym_pop(&local_stack, NULL, 0);
|
2019-12-14 19:31:03 +08:00
|
|
|
/* free preprocessor macros */
|
2021-10-22 13:39:54 +08:00
|
|
|
free_defines(NULL);
|
2019-12-14 19:31:03 +08:00
|
|
|
/* free sym_pools */
|
2021-10-22 13:39:54 +08:00
|
|
|
dynarray_reset(&sym_pools, &nb_sym_pools);
|
|
|
|
sym_free_first = NULL;
|
2019-12-11 07:37:18 +08:00
|
|
|
}
|
|
|
|
|
2017-04-04 14:34:52 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC ElfSym *elfsym(Sym *s)
|
2017-11-27 11:03:03 +08:00
|
|
|
{
|
|
|
|
if (!s || !s->c)
|
|
|
|
return NULL;
|
2017-12-13 00:57:20 +08:00
|
|
|
return &((ElfSym *)symtab_section->data)[s->c];
|
2017-11-27 11:03:03 +08:00
|
|
|
}
|
|
|
|
|
2017-05-08 12:38:09 +08:00
|
|
|
/* apply storage attributes to Elf symbol */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void update_storage(Sym *sym)
|
2017-04-04 14:34:52 +08:00
|
|
|
{
|
2017-11-30 22:15:22 +08:00
|
|
|
ElfSym *esym;
|
|
|
|
int sym_bind, old_sym_bind;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
esym = elfsym(sym);
|
2017-11-27 11:03:03 +08:00
|
|
|
if (!esym)
|
2017-04-04 14:34:52 +08:00
|
|
|
return;
|
2017-11-30 22:15:22 +08:00
|
|
|
|
2017-07-09 18:34:11 +08:00
|
|
|
if (sym->a.visibility)
|
2017-04-04 14:34:52 +08:00
|
|
|
esym->st_other = (esym->st_other & ~ELFW(ST_VISIBILITY)(-1))
|
2017-07-09 18:34:11 +08:00
|
|
|
| sym->a.visibility;
|
2017-11-30 22:15:22 +08:00
|
|
|
|
2019-06-22 10:00:52 +08:00
|
|
|
if (sym->type.t & (VT_STATIC | VT_INLINE))
|
2017-11-30 22:15:22 +08:00
|
|
|
sym_bind = STB_LOCAL;
|
|
|
|
else if (sym->a.weak)
|
|
|
|
sym_bind = STB_WEAK;
|
|
|
|
else
|
|
|
|
sym_bind = STB_GLOBAL;
|
|
|
|
old_sym_bind = ELFW(ST_BIND)(esym->st_info);
|
|
|
|
if (sym_bind != old_sym_bind) {
|
|
|
|
esym->st_info = ELFW(ST_INFO)(sym_bind, ELFW(ST_TYPE)(esym->st_info));
|
|
|
|
}
|
|
|
|
|
2017-04-04 14:34:52 +08:00
|
|
|
#ifdef TCC_TARGET_PE
|
2017-07-09 18:34:11 +08:00
|
|
|
if (sym->a.dllimport)
|
|
|
|
esym->st_other |= ST_PE_IMPORT;
|
|
|
|
if (sym->a.dllexport)
|
2017-04-04 14:34:52 +08:00
|
|
|
esym->st_other |= ST_PE_EXPORT;
|
|
|
|
#endif
|
2017-11-30 22:15:22 +08:00
|
|
|
|
2017-07-09 18:34:11 +08:00
|
|
|
#if 0
|
2017-11-30 22:15:22 +08:00
|
|
|
printf("storage %s: bind=%c vis=%d exp=%d imp=%d\n",
|
2021-10-22 13:39:54 +08:00
|
|
|
get_tok_str(sym->v, NULL),
|
2017-11-30 22:15:22 +08:00
|
|
|
sym_bind == STB_WEAK ? 'w' : sym_bind == STB_LOCAL ? 'l' : 'g',
|
2017-07-09 18:34:11 +08:00
|
|
|
sym->a.visibility,
|
|
|
|
sym->a.dllexport,
|
|
|
|
sym->a.dllimport
|
|
|
|
);
|
|
|
|
#endif
|
2017-04-04 14:34:52 +08:00
|
|
|
}
|
|
|
|
|
2016-10-15 21:55:31 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* update sym->c so that it points to an external symbol in section
|
|
|
|
'section' with value 'value' */
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void put_extern_sym2(Sym *sym, int sh_num,
|
2016-10-15 21:55:31 +08:00
|
|
|
addr_t value, unsigned long size,
|
|
|
|
int can_add_underscore)
|
|
|
|
{
|
2017-12-13 00:57:20 +08:00
|
|
|
int sym_type, sym_bind, info, other, t;
|
2017-11-27 11:03:03 +08:00
|
|
|
ElfSym *esym;
|
2016-10-15 21:55:31 +08:00
|
|
|
const char *name;
|
|
|
|
char buf1[256];
|
2020-07-06 06:00:42 +08:00
|
|
|
|
2016-10-15 21:55:31 +08:00
|
|
|
if (!sym->c) {
|
2021-10-22 13:39:54 +08:00
|
|
|
name = get_tok_str(sym->v, NULL);
|
2017-04-04 14:34:52 +08:00
|
|
|
t = sym->type.t;
|
|
|
|
if ((t & VT_BTYPE) == VT_FUNC) {
|
|
|
|
sym_type = STT_FUNC;
|
|
|
|
} else if ((t & VT_BTYPE) == VT_VOID) {
|
|
|
|
sym_type = STT_NOTYPE;
|
2020-11-25 18:23:01 +08:00
|
|
|
if ((t & (VT_BTYPE|VT_ASM_FUNC)) == VT_ASM_FUNC)
|
|
|
|
sym_type = STT_FUNC;
|
2017-04-04 14:34:52 +08:00
|
|
|
} else {
|
|
|
|
sym_type = STT_OBJECT;
|
|
|
|
}
|
2019-06-22 10:00:52 +08:00
|
|
|
if (t & (VT_STATIC | VT_INLINE))
|
2017-04-04 14:34:52 +08:00
|
|
|
sym_bind = STB_LOCAL;
|
|
|
|
else
|
|
|
|
sym_bind = STB_GLOBAL;
|
2016-10-15 21:55:31 +08:00
|
|
|
other = 0;
|
2020-07-06 06:00:42 +08:00
|
|
|
|
2016-10-15 21:55:31 +08:00
|
|
|
#ifdef TCC_TARGET_PE
|
|
|
|
if (sym_type == STT_FUNC && sym->type.ref) {
|
|
|
|
Sym *ref = sym->type.ref;
|
2018-07-22 07:54:01 +08:00
|
|
|
if (ref->a.nodecorate) {
|
|
|
|
can_add_underscore = 0;
|
|
|
|
}
|
2017-07-09 18:34:11 +08:00
|
|
|
if (ref->f.func_call == FUNC_STDCALL && can_add_underscore) {
|
|
|
|
sprintf(buf1, "_%s@%d", name, ref->f.func_args * PTR_SIZE);
|
2016-10-15 21:55:31 +08:00
|
|
|
name = buf1;
|
|
|
|
other |= ST_PE_STDCALL;
|
|
|
|
can_add_underscore = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2020-07-06 06:00:42 +08:00
|
|
|
|
|
|
|
if (sym->asm_label) {
|
2021-10-22 13:39:54 +08:00
|
|
|
name = get_tok_str(sym->asm_label, NULL);
|
Reinstate attribute alias handling
commit 2a0167a merged alias and asm symbol renaming, but broke
semantics of aliases, see testcase. Basically the difference between
the two is that an asm rename doesn't generate a new symbol, i.e. with
int foo __asm__("bar");
all source reference to 'foo' will be to 'bar', nothing of the name
'foo' will remain in the object file, and for instance reference to
'foo' from other compilation units won't be resolved to this one.
Aliases OTOH create an additional symbol. With:
void target (void) { return; }
void afunc (void) __attribute__((alias("target")));
reference to 'afunc' will remain 'afunc' in the object file. It will
generate two symbols, 'afunc' and 'target' referring to the same entity.
This difference matters if other compilation units make references to
'afunc'.
A side requirement of this is that for alias to work that the target
symbol needs to be defined in the same unit. For TCC we even require a
stricter variant: it must be defined before the alias is created.
Now, with this I merely re-instated the old flow of events before above
commit. It didn't seem useful anymore to place both names in the
asm_label member of attributes, and the asm_label member of Sym now
again only needs the hold the __asm__ rename.
It also follows that tcc_predefs.h can't make use of attribute alias to
e.g. map __builtin_memcpy to __bound_memcpy (simply because the latter
isn't defined in all units), but rather must use __asm__ renaming, which
in turn means that the underscore handling needs to be done by hand.
2020-09-30 23:46:01 +08:00
|
|
|
can_add_underscore = 0;
|
2020-07-06 06:00:42 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->leading_underscore && can_add_underscore) {
|
2016-10-15 21:55:31 +08:00
|
|
|
buf1[0] = '_';
|
|
|
|
pstrcpy(buf1 + 1, sizeof(buf1) - 1, name);
|
|
|
|
name = buf1;
|
|
|
|
}
|
2020-07-06 06:00:42 +08:00
|
|
|
|
2016-10-15 21:55:31 +08:00
|
|
|
info = ELFW(ST_INFO)(sym_bind, sym_type);
|
2021-10-22 13:39:54 +08:00
|
|
|
sym->c = put_elf_sym(symtab_section, value, size, info, other, sh_num, name);
|
2020-05-03 17:59:57 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
|
|
|
tcc_debug_extern_sym(tcc_state, sym, sh_num, sym_bind, sym_type);
|
2020-05-05 20:47:00 +08:00
|
|
|
|
2016-10-15 21:55:31 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
esym = elfsym(sym);
|
2016-10-15 21:55:31 +08:00
|
|
|
esym->st_value = value;
|
|
|
|
esym->st_size = size;
|
|
|
|
esym->st_shndx = sh_num;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
update_storage(sym);
|
2016-10-15 21:55:31 +08:00
|
|
|
}
|
|
|
|
|
2022-05-29 02:50:09 +08:00
|
|
|
ST_FUNC void put_extern_sym(Sym *sym, Section *s, addr_t value, unsigned long size)
|
2016-10-15 21:55:31 +08:00
|
|
|
{
|
2022-05-29 02:50:09 +08:00
|
|
|
if (nocode_wanted && (NODATA_WANTED || (s && s == cur_text_section)))
|
|
|
|
return;
|
|
|
|
put_extern_sym2(sym, s ? s->sh_num : SHN_UNDEF, value, size, 1);
|
2016-10-15 21:55:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* add a new relocation entry to symbol 'sym' in section 's' */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void greloca(Section *s, Sym *sym, unsigned long offset, int type,
|
2016-10-15 21:55:31 +08:00
|
|
|
addr_t addend)
|
|
|
|
{
|
|
|
|
int c = 0;
|
2016-12-19 00:23:33 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (nocode_wanted && s == cur_text_section)
|
2016-12-19 00:23:33 +08:00
|
|
|
return;
|
|
|
|
|
2016-10-15 21:55:31 +08:00
|
|
|
if (sym) {
|
|
|
|
if (0 == sym->c)
|
2021-10-22 13:39:54 +08:00
|
|
|
put_extern_sym(sym, NULL, 0, 0);
|
2016-10-15 21:55:31 +08:00
|
|
|
c = sym->c;
|
|
|
|
}
|
2016-12-19 00:23:33 +08:00
|
|
|
|
2016-10-15 21:55:31 +08:00
|
|
|
/* now we can add ELF relocation info */
|
|
|
|
put_elf_reloca(symtab_section, s, offset, type, c, addend);
|
|
|
|
}
|
|
|
|
|
2017-05-13 14:59:06 +08:00
|
|
|
#if PTR_SIZE == 4
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void greloc(Section *s, Sym *sym, unsigned long offset, int type)
|
2016-10-15 21:55:31 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
greloca(s, sym, offset, type, 0);
|
2016-10-15 21:55:31 +08:00
|
|
|
}
|
2017-05-13 14:59:06 +08:00
|
|
|
#endif
|
2016-10-15 21:55:31 +08:00
|
|
|
|
2009-12-20 08:53:49 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* symbol allocator */
|
2021-10-22 13:39:54 +08:00
|
|
|
static Sym *__sym_malloc(void)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
|
|
|
Sym *sym_pool, *sym, *last_sym;
|
|
|
|
int i;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
sym_pool = tcc_malloc(SYM_POOL_NB * sizeof(Sym));
|
|
|
|
dynarray_add(&sym_pools, &nb_sym_pools, sym_pool);
|
2009-12-20 08:53:49 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
last_sym = sym_free_first;
|
2009-12-20 08:53:49 +08:00
|
|
|
sym = sym_pool;
|
|
|
|
for(i = 0; i < SYM_POOL_NB; i++) {
|
|
|
|
sym->next = last_sym;
|
|
|
|
last_sym = sym;
|
|
|
|
sym++;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
sym_free_first = last_sym;
|
2009-12-20 08:53:49 +08:00
|
|
|
return last_sym;
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static inline Sym *sym_malloc(void)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
|
|
|
Sym *sym;
|
2016-08-15 11:09:31 +08:00
|
|
|
#ifndef SYM_DEBUG
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = sym_free_first;
|
2009-12-20 08:53:49 +08:00
|
|
|
if (!sym)
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = __sym_malloc();
|
|
|
|
sym_free_first = sym->next;
|
2009-12-20 08:53:49 +08:00
|
|
|
return sym;
|
2016-08-15 11:09:31 +08:00
|
|
|
#else
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = tcc_malloc(sizeof(Sym));
|
2016-08-15 11:09:31 +08:00
|
|
|
return sym;
|
|
|
|
#endif
|
2009-12-20 08:53:49 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_INLN void sym_free(Sym *sym)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
2016-08-15 11:09:31 +08:00
|
|
|
#ifndef SYM_DEBUG
|
2021-10-22 13:39:54 +08:00
|
|
|
sym->next = sym_free_first;
|
|
|
|
sym_free_first = sym;
|
2016-08-15 11:09:31 +08:00
|
|
|
#else
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_free(sym);
|
2016-08-15 11:09:31 +08:00
|
|
|
#endif
|
2009-12-20 08:53:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* push, without hashing */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC Sym *sym_push2(Sym **ps, int v, int t, int c)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
|
|
|
Sym *s;
|
2016-05-05 16:39:09 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_malloc();
|
2017-07-09 18:34:11 +08:00
|
|
|
memset(s, 0, sizeof *s);
|
2009-12-20 08:53:49 +08:00
|
|
|
s->v = v;
|
|
|
|
s->type.t = t;
|
|
|
|
s->c = c;
|
|
|
|
/* add in stack */
|
|
|
|
s->prev = *ps;
|
|
|
|
*ps = s;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find a symbol and return its associated structure. 's' is the top
|
|
|
|
of the symbol stack */
|
|
|
|
ST_FUNC Sym *sym_find2(Sym *s, int v)
|
|
|
|
{
|
|
|
|
while (s) {
|
|
|
|
if (s->v == v)
|
|
|
|
return s;
|
2014-04-07 21:12:08 +08:00
|
|
|
else if (s->v == -1)
|
|
|
|
return NULL;
|
2009-12-20 08:53:49 +08:00
|
|
|
s = s->prev;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* structure lookup */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_INLN Sym *struct_find(int v)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
|
|
|
v -= TOK_IDENT;
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((unsigned)v >= (unsigned)(tok_ident - TOK_IDENT))
|
2009-12-20 08:53:49 +08:00
|
|
|
return NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
return table_ident[v]->sym_struct;
|
2009-12-20 08:53:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* find an identifier */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_INLN Sym *sym_find(int v)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
|
|
|
v -= TOK_IDENT;
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((unsigned)v >= (unsigned)(tok_ident - TOK_IDENT))
|
2009-12-20 08:53:49 +08:00
|
|
|
return NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
return table_ident[v]->sym_identifier;
|
2009-12-20 08:53:49 +08:00
|
|
|
}
|
|
|
|
|
2019-03-09 00:58:25 +08:00
|
|
|
static int sym_scope(Sym *s)
|
|
|
|
{
|
|
|
|
if (IS_ENUM_VAL (s->type.t))
|
|
|
|
return s->type.ref->sym_scope;
|
|
|
|
else
|
|
|
|
return s->sym_scope;
|
|
|
|
}
|
|
|
|
|
2009-12-20 08:53:49 +08:00
|
|
|
/* push a given symbol on the symbol stack */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC Sym *sym_push(int v, CType *type, int r, int c)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
|
|
|
Sym *s, **ps;
|
|
|
|
TokenSym *ts;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (local_stack)
|
|
|
|
ps = &local_stack;
|
2009-12-20 08:53:49 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
ps = &global_stack;
|
|
|
|
s = sym_push2(ps, v, type->t, c);
|
2009-12-20 08:53:49 +08:00
|
|
|
s->type.ref = type->ref;
|
|
|
|
s->r = r;
|
|
|
|
/* don't record fields or anonymous symbols */
|
|
|
|
/* XXX: simplify */
|
|
|
|
if (!(v & SYM_FIELD) && (v & ~SYM_STRUCT) < SYM_FIRST_ANOM) {
|
|
|
|
/* record symbol in token array */
|
2021-10-22 13:39:54 +08:00
|
|
|
ts = table_ident[(v & ~SYM_STRUCT) - TOK_IDENT];
|
2009-12-20 08:53:49 +08:00
|
|
|
if (v & SYM_STRUCT)
|
|
|
|
ps = &ts->sym_struct;
|
|
|
|
else
|
|
|
|
ps = &ts->sym_identifier;
|
|
|
|
s->prev_tok = *ps;
|
|
|
|
*ps = s;
|
2021-10-22 13:39:54 +08:00
|
|
|
s->sym_scope = local_scope;
|
2019-03-09 00:58:25 +08:00
|
|
|
if (s->prev_tok && sym_scope(s->prev_tok) == s->sym_scope)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("redeclaration of '%s'",
|
|
|
|
get_tok_str(v & ~SYM_STRUCT, NULL));
|
2009-12-20 08:53:49 +08:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* push a global identifier */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC Sym *global_identifier_push(int v, int t, int c)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
|
|
|
Sym *s, **ps;
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_push2(&global_stack, v, t, c);
|
2019-04-18 09:36:39 +08:00
|
|
|
s->r = VT_CONST | VT_SYM;
|
2009-12-20 08:53:49 +08:00
|
|
|
/* don't record anonymous symbol */
|
|
|
|
if (v < SYM_FIRST_ANOM) {
|
2021-10-22 13:39:54 +08:00
|
|
|
ps = &table_ident[v - TOK_IDENT]->sym_identifier;
|
2019-04-18 09:36:39 +08:00
|
|
|
/* modify the top most local identifier, so that sym_identifier will
|
|
|
|
point to 's' when popped; happens when called from inline asm */
|
2017-11-27 11:03:03 +08:00
|
|
|
while (*ps != NULL && (*ps)->sym_scope)
|
2009-12-20 08:53:49 +08:00
|
|
|
ps = &(*ps)->prev_tok;
|
2017-11-27 11:03:03 +08:00
|
|
|
s->prev_tok = *ps;
|
2009-12-20 08:53:49 +08:00
|
|
|
*ps = s;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2016-08-15 11:09:31 +08:00
|
|
|
/* pop symbols until top reaches 'b'. If KEEP is non-zero don't really
|
|
|
|
pop them yet from the list, but do remove them from the token array. */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void sym_pop(Sym **ptop, Sym *b, int keep)
|
2009-12-20 08:53:49 +08:00
|
|
|
{
|
|
|
|
Sym *s, *ss, **ps;
|
|
|
|
TokenSym *ts;
|
|
|
|
int v;
|
|
|
|
|
|
|
|
s = *ptop;
|
|
|
|
while(s != b) {
|
|
|
|
ss = s->prev;
|
|
|
|
v = s->v;
|
|
|
|
/* remove symbol in token array */
|
|
|
|
/* XXX: simplify */
|
|
|
|
if (!(v & SYM_FIELD) && (v & ~SYM_STRUCT) < SYM_FIRST_ANOM) {
|
2021-10-22 13:39:54 +08:00
|
|
|
ts = table_ident[(v & ~SYM_STRUCT) - TOK_IDENT];
|
2009-12-20 08:53:49 +08:00
|
|
|
if (v & SYM_STRUCT)
|
|
|
|
ps = &ts->sym_struct;
|
|
|
|
else
|
|
|
|
ps = &ts->sym_identifier;
|
|
|
|
*ps = s->prev_tok;
|
|
|
|
}
|
2016-08-15 11:09:31 +08:00
|
|
|
if (!keep)
|
2021-10-22 13:39:54 +08:00
|
|
|
sym_free(s);
|
2009-12-20 08:53:49 +08:00
|
|
|
s = ss;
|
|
|
|
}
|
2016-08-15 11:09:31 +08:00
|
|
|
if (!keep)
|
|
|
|
*ptop = b;
|
2009-12-20 08:53:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vcheck_cmp(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
/* cannot let cpu flags if other instruction are generated. Also
|
|
|
|
avoid leaving VT_JMP anywhere except on the top of the stack
|
2016-12-20 11:49:22 +08:00
|
|
|
because it would complicate the code generator.
|
|
|
|
|
|
|
|
Don't do this when nocode_wanted. vtop might come from
|
|
|
|
!nocode_wanted regions (see 88_codeopt.c) and transforming
|
|
|
|
it to a register without actually generating code is wrong
|
|
|
|
as their value might still be used for real. All values
|
|
|
|
we push under nocode_wanted will eventually be popped
|
|
|
|
again, so that the VT_CMP/VT_JMP value will be in vtop
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
when code is unsuppressed again. */
|
2017-02-09 02:45:31 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r == VT_CMP && !nocode_wanted)
|
|
|
|
gv(RC_INT);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
}
|
2017-02-09 02:45:31 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vsetc(CType *type, int r, CValue *vc)
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop >= vstack + (VSTACK_SIZE - 1))
|
|
|
|
tcc_error("memory full (vstack)");
|
|
|
|
vcheck_cmp();
|
|
|
|
vtop++;
|
|
|
|
vtop->type = *type;
|
|
|
|
vtop->r = r;
|
|
|
|
vtop->r2 = VT_CONST;
|
|
|
|
vtop->c = *vc;
|
|
|
|
vtop->sym = NULL;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vswap(void)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
|
|
|
SValue tmp;
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
vcheck_cmp();
|
|
|
|
tmp = vtop[0];
|
|
|
|
vtop[0] = vtop[-1];
|
|
|
|
vtop[-1] = tmp;
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* pop stack value */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vpop(void)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
|
|
|
int v;
|
2021-10-22 13:39:54 +08:00
|
|
|
v = vtop->r & VT_VALMASK;
|
2017-02-09 02:45:31 +08:00
|
|
|
#if defined(TCC_TARGET_I386) || defined(TCC_TARGET_X86_64)
|
|
|
|
/* for x86, we need to pop the FP stack */
|
|
|
|
if (v == TREG_ST0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
o(0xd8dd); /* fstp %st(0) */
|
2017-02-09 02:45:31 +08:00
|
|
|
} else
|
|
|
|
#endif
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
if (v == VT_CMP) {
|
2017-02-09 02:45:31 +08:00
|
|
|
/* need to put correct jump if && or || without test */
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(vtop->jtrue);
|
|
|
|
gsym(vtop->jfalse);
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
|
2010-04-11 07:53:40 +08:00
|
|
|
/* push constant of type "type" with useless value */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vpush(CType *type)
|
2010-04-11 07:53:40 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(type, VT_CONST, 0);
|
2010-04-11 07:53:40 +08:00
|
|
|
}
|
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
/* push arbitrary 64bit constant */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vpush64(int ty, unsigned long long v)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
CValue cval;
|
2019-12-17 01:51:28 +08:00
|
|
|
CType ctype;
|
|
|
|
ctype.t = ty;
|
|
|
|
ctype.ref = NULL;
|
2009-05-06 02:18:10 +08:00
|
|
|
cval.i = v;
|
2021-10-22 13:39:54 +08:00
|
|
|
vsetc(&ctype, VT_CONST, &cval);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
/* push integer constant */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vpushi(int v)
|
2012-04-16 07:13:25 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush64(VT_INT, v);
|
2012-04-16 07:13:25 +08:00
|
|
|
}
|
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
/* push a pointer sized constant */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vpushs(addr_t v)
|
2010-05-06 08:19:00 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush64(VT_SIZE_T, v);
|
2010-05-06 08:19:00 +08:00
|
|
|
}
|
|
|
|
|
2013-02-09 02:07:11 +08:00
|
|
|
/* push long long constant */
|
2021-10-22 13:39:54 +08:00
|
|
|
static inline void vpushll(long long v)
|
2013-02-09 02:07:11 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush64(VT_LLONG, v);
|
2013-02-09 02:07:11 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vset(CType *type, int r, int v)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
|
|
|
CValue cval;
|
|
|
|
cval.i = v;
|
2021-10-22 13:39:54 +08:00
|
|
|
vsetc(type, r, &cval);
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vseti(int r, int v)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
|
|
|
CType type;
|
|
|
|
type.t = VT_INT;
|
2017-07-09 18:34:11 +08:00
|
|
|
type.ref = NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(&type, r, v);
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vpushv(SValue *v)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop >= vstack + (VSTACK_SIZE - 1))
|
|
|
|
tcc_error("memory full (vstack)");
|
|
|
|
vtop++;
|
|
|
|
*vtop = *v;
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vdup(void)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushv(vtop);
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* rotate n first stack elements to the bottom
|
|
|
|
I1 ... In -> I2 ... In I1 [top is right]
|
|
|
|
*/
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vrotb(int n)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
SValue tmp;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
vcheck_cmp();
|
|
|
|
tmp = vtop[-n + 1];
|
2017-02-09 02:45:31 +08:00
|
|
|
for(i=-n+1;i!=0;i++)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop[i] = vtop[i+1];
|
|
|
|
vtop[0] = tmp;
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* rotate the n elements before entry e towards the top
|
|
|
|
I1 ... In ... -> In I1 ... I(n-1) ... [top is right]
|
|
|
|
*/
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vrote(SValue *e, int n)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
SValue tmp;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
vcheck_cmp();
|
2017-02-09 02:45:31 +08:00
|
|
|
tmp = *e;
|
|
|
|
for(i = 0;i < n - 1; i++)
|
|
|
|
e[-i] = e[-i - 1];
|
|
|
|
e[-n + 1] = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* rotate n first stack elements to the top
|
|
|
|
I1 ... In -> In I1 ... I(n-1) [top is right]
|
|
|
|
*/
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vrott(int n)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vrote(vtop, n);
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* vtop->r = VT_CMP means CPU-flags have been set from comparison or test. */
|
|
|
|
|
|
|
|
/* called from generators to set the result from relational ops */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vset_VT_CMP(int op)
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r = VT_CMP;
|
|
|
|
vtop->cmp_op = op;
|
|
|
|
vtop->jfalse = 0;
|
|
|
|
vtop->jtrue = 0;
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* called once before asking generators to load VT_CMP to a register */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vset_VT_JMP(void)
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
int op = vtop->cmp_op;
|
2020-01-21 21:23:57 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->jtrue || vtop->jfalse) {
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
/* we need to jump to 'mov $0,%R' or 'mov $1,%R' */
|
|
|
|
int inv = op & (op < 2); /* small optimization */
|
2021-10-22 13:39:54 +08:00
|
|
|
vseti(VT_JMP+inv, gvtst(inv, 0));
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
} else {
|
|
|
|
/* otherwise convert flags (rsp. 0/1) to register */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = op;
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
if (op < 2) /* doesn't seem to happen */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r = VT_CONST;
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set CPU Flags, doesn't yet jump */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gvtst_set(int inv, int t)
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
{
|
|
|
|
int *p;
|
2020-01-21 21:23:57 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r != VT_CMP) {
|
|
|
|
vpushi(0);
|
|
|
|
gen_op(TOK_NE);
|
|
|
|
if (vtop->r != VT_CMP) /* must be VT_CONST then */
|
|
|
|
vset_VT_CMP(vtop->c.i != 0);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
}
|
2020-01-21 21:23:57 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
p = inv ? &vtop->jfalse : &vtop->jtrue;
|
|
|
|
*p = gjmp_append(*p, t);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate value test
|
|
|
|
*
|
|
|
|
* Generate a test for any value (jump, comparison and integers) */
|
2021-10-22 13:39:54 +08:00
|
|
|
static int gvtst(int inv, int t)
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
{
|
2020-01-13 08:06:25 +08:00
|
|
|
int op, x, u;
|
2020-01-21 21:23:57 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
gvtst_set(inv, t);
|
|
|
|
t = vtop->jtrue, u = vtop->jfalse;
|
2020-01-21 21:23:57 +08:00
|
|
|
if (inv)
|
|
|
|
x = u, u = t, t = x;
|
2021-10-22 13:39:54 +08:00
|
|
|
op = vtop->cmp_op;
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
|
|
|
|
/* jump to the wanted target */
|
|
|
|
if (op > 1)
|
2021-10-22 13:39:54 +08:00
|
|
|
t = gjmp_cond(op ^ inv, t);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
else if (op != inv)
|
2021-10-22 13:39:54 +08:00
|
|
|
t = gjmp(t);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
/* resolve complementary jumps to here */
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(u);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
/* generate a zero or nozero test */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_test_zero(int op)
|
2019-12-17 01:51:28 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r == VT_CMP) {
|
2019-12-17 01:51:28 +08:00
|
|
|
int j;
|
|
|
|
if (op == TOK_EQ) {
|
2021-10-22 13:39:54 +08:00
|
|
|
j = vtop->jfalse;
|
|
|
|
vtop->jfalse = vtop->jtrue;
|
|
|
|
vtop->jtrue = j;
|
|
|
|
vtop->cmp_op ^= 1;
|
2019-12-17 01:51:28 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
|
|
|
gen_op(op);
|
2019-12-17 01:51:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
2014-01-12 06:42:58 +08:00
|
|
|
/* push a symbol value of TYPE */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vpushsym(CType *type, Sym *sym)
|
2014-01-12 06:42:58 +08:00
|
|
|
{
|
|
|
|
CValue cval;
|
2015-11-18 03:09:35 +08:00
|
|
|
cval.i = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
vsetc(type, VT_CONST | VT_SYM, &cval);
|
|
|
|
vtop->sym = sym;
|
2014-01-12 06:42:58 +08:00
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* Return a static symbol pointing to a section */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC Sym *get_sym_ref(CType *type, Section *sec, unsigned long offset, unsigned long size)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int v;
|
|
|
|
Sym *sym;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
v = anon_sym++;
|
|
|
|
sym = sym_push(v, type, VT_CONST | VT_SYM, 0);
|
2019-04-18 09:36:39 +08:00
|
|
|
sym->type.t |= VT_STATIC;
|
2021-10-22 13:39:54 +08:00
|
|
|
put_extern_sym(sym, sec, offset, size);
|
2009-05-06 02:18:10 +08:00
|
|
|
return sym;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* push a reference to a section offset by adding a dummy symbol */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vpush_ref(CType *type, Section *sec, unsigned long offset, unsigned long size)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushsym(type, get_sym_ref(type, sec, offset, size));
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* define a new external reference to a symbol 'v' of type 'u' */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC Sym *external_global_sym(int v, CType *type)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
Sym *s;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_find(v);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (!s) {
|
|
|
|
/* push forward reference */
|
2021-10-22 13:39:54 +08:00
|
|
|
s = global_identifier_push(v, type->t | VT_EXTERN, 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
s->type.ref = type->ref;
|
2017-11-30 22:15:22 +08:00
|
|
|
} else if (IS_ASM_SYM(s)) {
|
|
|
|
s->type.t = type->t | (s->type.t & VT_EXTERN);
|
|
|
|
s->type.ref = type->ref;
|
2021-10-22 13:39:54 +08:00
|
|
|
update_storage(s);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2020-11-24 03:15:16 +08:00
|
|
|
/* create an external reference with no specific type similar to asm labels.
|
|
|
|
This avoids type conflicts if the symbol is used from C too */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC Sym *external_helper_sym(int v)
|
2020-11-24 03:15:16 +08:00
|
|
|
{
|
2020-11-25 18:23:01 +08:00
|
|
|
CType ct = { VT_ASM_FUNC, NULL };
|
2021-10-22 13:39:54 +08:00
|
|
|
return external_global_sym(v, &ct);
|
2020-11-24 03:15:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* push a reference to an helper function (such as memmove) */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vpush_helper_func(int v)
|
2020-11-24 03:15:16 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushsym(&func_old_type, external_helper_sym(v));
|
2020-11-24 03:15:16 +08:00
|
|
|
}
|
|
|
|
|
2019-01-01 05:00:31 +08:00
|
|
|
/* Merge symbol attributes. */
|
|
|
|
static void merge_symattr(struct SymAttr *sa, struct SymAttr *sa1)
|
|
|
|
{
|
|
|
|
if (sa1->aligned && !sa->aligned)
|
|
|
|
sa->aligned = sa1->aligned;
|
|
|
|
sa->packed |= sa1->packed;
|
|
|
|
sa->weak |= sa1->weak;
|
|
|
|
if (sa1->visibility != STV_DEFAULT) {
|
|
|
|
int vis = sa->visibility;
|
|
|
|
if (vis == STV_DEFAULT
|
|
|
|
|| vis > sa1->visibility)
|
|
|
|
vis = sa1->visibility;
|
|
|
|
sa->visibility = vis;
|
|
|
|
}
|
|
|
|
sa->dllexport |= sa1->dllexport;
|
|
|
|
sa->nodecorate |= sa1->nodecorate;
|
|
|
|
sa->dllimport |= sa1->dllimport;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Merge function attributes. */
|
|
|
|
static void merge_funcattr(struct FuncAttr *fa, struct FuncAttr *fa1)
|
|
|
|
{
|
|
|
|
if (fa1->func_call && !fa->func_call)
|
|
|
|
fa->func_call = fa1->func_call;
|
|
|
|
if (fa1->func_type && !fa->func_type)
|
|
|
|
fa->func_type = fa1->func_type;
|
|
|
|
if (fa1->func_args && !fa->func_args)
|
|
|
|
fa->func_args = fa1->func_args;
|
2020-05-13 17:14:53 +08:00
|
|
|
if (fa1->func_noreturn)
|
|
|
|
fa->func_noreturn = 1;
|
|
|
|
if (fa1->func_ctor)
|
|
|
|
fa->func_ctor = 1;
|
|
|
|
if (fa1->func_dtor)
|
|
|
|
fa->func_dtor = 1;
|
2019-01-01 05:00:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Merge attributes. */
|
|
|
|
static void merge_attr(AttributeDef *ad, AttributeDef *ad1)
|
|
|
|
{
|
|
|
|
merge_symattr(&ad->a, &ad1->a);
|
|
|
|
merge_funcattr(&ad->f, &ad1->f);
|
|
|
|
|
|
|
|
if (ad1->section)
|
|
|
|
ad->section = ad1->section;
|
Reinstate attribute alias handling
commit 2a0167a merged alias and asm symbol renaming, but broke
semantics of aliases, see testcase. Basically the difference between
the two is that an asm rename doesn't generate a new symbol, i.e. with
int foo __asm__("bar");
all source reference to 'foo' will be to 'bar', nothing of the name
'foo' will remain in the object file, and for instance reference to
'foo' from other compilation units won't be resolved to this one.
Aliases OTOH create an additional symbol. With:
void target (void) { return; }
void afunc (void) __attribute__((alias("target")));
reference to 'afunc' will remain 'afunc' in the object file. It will
generate two symbols, 'afunc' and 'target' referring to the same entity.
This difference matters if other compilation units make references to
'afunc'.
A side requirement of this is that for alias to work that the target
symbol needs to be defined in the same unit. For TCC we even require a
stricter variant: it must be defined before the alias is created.
Now, with this I merely re-instated the old flow of events before above
commit. It didn't seem useful anymore to place both names in the
asm_label member of attributes, and the asm_label member of Sym now
again only needs the hold the __asm__ rename.
It also follows that tcc_predefs.h can't make use of attribute alias to
e.g. map __builtin_memcpy to __bound_memcpy (simply because the latter
isn't defined in all units), but rather must use __asm__ renaming, which
in turn means that the underscore handling needs to be done by hand.
2020-09-30 23:46:01 +08:00
|
|
|
if (ad1->alias_target)
|
|
|
|
ad->alias_target = ad1->alias_target;
|
2019-01-01 05:00:31 +08:00
|
|
|
if (ad1->asm_label)
|
|
|
|
ad->asm_label = ad1->asm_label;
|
|
|
|
if (ad1->attr_mode)
|
|
|
|
ad->attr_mode = ad1->attr_mode;
|
|
|
|
}
|
|
|
|
|
2017-12-04 03:43:48 +08:00
|
|
|
/* Merge some type attributes. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void patch_type(Sym *sym, CType *type)
|
2017-12-04 03:43:48 +08:00
|
|
|
{
|
2019-06-22 10:00:52 +08:00
|
|
|
if (!(type->t & VT_EXTERN) || IS_ENUM_VAL(sym->type.t)) {
|
2017-12-04 03:43:48 +08:00
|
|
|
if (!(sym->type.t & VT_EXTERN))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("redefinition of '%s'", get_tok_str(sym->v, NULL));
|
2017-12-04 03:43:48 +08:00
|
|
|
sym->type.t &= ~VT_EXTERN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ASM_SYM(sym)) {
|
2017-12-13 00:57:20 +08:00
|
|
|
/* stay static if both are static */
|
|
|
|
sym->type.t = type->t & (sym->type.t | ~VT_STATIC);
|
|
|
|
sym->type.ref = type->ref;
|
|
|
|
}
|
2017-12-04 03:43:48 +08:00
|
|
|
|
2017-12-13 00:57:20 +08:00
|
|
|
if (!is_compatible_types(&sym->type, type)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("incompatible types for redefinition of '%s'",
|
|
|
|
get_tok_str(sym->v, NULL));
|
2017-12-04 03:43:48 +08:00
|
|
|
|
|
|
|
} else if ((sym->type.t & VT_BTYPE) == VT_FUNC) {
|
|
|
|
int static_proto = sym->type.t & VT_STATIC;
|
|
|
|
/* warn if static follows non-static function declaration */
|
2019-06-18 00:28:56 +08:00
|
|
|
if ((type->t & VT_STATIC) && !static_proto
|
|
|
|
/* XXX this test for inline shouldn't be here. Until we
|
|
|
|
implement gnu-inline mode again it silences a warning for
|
|
|
|
mingw caused by our workarounds. */
|
|
|
|
&& !((type->t | sym->type.t) & VT_INLINE))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("static storage ignored for redefinition of '%s'",
|
|
|
|
get_tok_str(sym->v, NULL));
|
2017-12-04 03:43:48 +08:00
|
|
|
|
2019-06-22 10:00:52 +08:00
|
|
|
/* set 'inline' if both agree or if one has static */
|
|
|
|
if ((type->t | sym->type.t) & VT_INLINE) {
|
|
|
|
if (!((type->t ^ sym->type.t) & VT_INLINE)
|
|
|
|
|| ((type->t | sym->type.t) & VT_STATIC))
|
|
|
|
static_proto |= VT_INLINE;
|
2019-06-17 09:34:03 +08:00
|
|
|
}
|
2019-06-22 10:00:52 +08:00
|
|
|
|
2017-12-04 03:43:48 +08:00
|
|
|
if (0 == (type->t & VT_EXTERN)) {
|
2020-05-13 17:14:53 +08:00
|
|
|
struct FuncAttr f = sym->type.ref->f;
|
2019-06-22 10:00:52 +08:00
|
|
|
/* put complete type, use static from prototype */
|
|
|
|
sym->type.t = (type->t & ~(VT_STATIC|VT_INLINE)) | static_proto;
|
|
|
|
sym->type.ref = type->ref;
|
2020-05-13 17:14:53 +08:00
|
|
|
merge_funcattr(&sym->type.ref->f, &f);
|
2019-06-22 10:00:52 +08:00
|
|
|
} else {
|
|
|
|
sym->type.t &= ~VT_INLINE | static_proto;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sym->type.ref->f.func_type == FUNC_OLD
|
|
|
|
&& type->ref->f.func_type != FUNC_OLD) {
|
|
|
|
sym->type.ref = type->ref;
|
2017-12-04 03:43:48 +08:00
|
|
|
}
|
2019-06-22 10:00:52 +08:00
|
|
|
|
2017-12-04 03:43:48 +08:00
|
|
|
} else {
|
|
|
|
if ((sym->type.t & VT_ARRAY) && type->ref->c >= 0) {
|
|
|
|
/* set array size if it was omitted in extern declaration */
|
2019-06-22 10:00:52 +08:00
|
|
|
sym->type.ref->c = type->ref->c;
|
2017-12-04 03:43:48 +08:00
|
|
|
}
|
|
|
|
if ((type->t ^ sym->type.t) & VT_STATIC)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("storage mismatch for redefinition of '%s'",
|
|
|
|
get_tok_str(sym->v, NULL));
|
2017-12-04 03:43:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-04 14:34:52 +08:00
|
|
|
/* Merge some storage attributes. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void patch_storage(Sym *sym, AttributeDef *ad, CType *type)
|
2017-04-04 14:34:52 +08:00
|
|
|
{
|
2017-12-04 03:43:48 +08:00
|
|
|
if (type)
|
2021-10-22 13:39:54 +08:00
|
|
|
patch_type(sym, type);
|
2017-12-04 03:43:48 +08:00
|
|
|
|
2017-04-04 14:34:52 +08:00
|
|
|
#ifdef TCC_TARGET_PE
|
2017-07-09 18:34:11 +08:00
|
|
|
if (sym->a.dllimport != ad->a.dllimport)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("incompatible dll linkage for redefinition of '%s'",
|
|
|
|
get_tok_str(sym->v, NULL));
|
2017-12-04 03:43:48 +08:00
|
|
|
#endif
|
2019-01-01 05:00:31 +08:00
|
|
|
merge_symattr(&sym->a, &ad->a);
|
2017-07-09 18:34:11 +08:00
|
|
|
if (ad->asm_label)
|
|
|
|
sym->asm_label = ad->asm_label;
|
2021-10-22 13:39:54 +08:00
|
|
|
update_storage(sym);
|
2017-04-04 14:34:52 +08:00
|
|
|
}
|
|
|
|
|
2019-06-22 10:00:52 +08:00
|
|
|
/* copy sym to other stack */
|
2021-10-22 13:39:54 +08:00
|
|
|
static Sym *sym_copy(Sym *s0, Sym **ps)
|
2019-06-22 10:00:52 +08:00
|
|
|
{
|
|
|
|
Sym *s;
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_malloc(), *s = *s0;
|
2019-06-22 10:00:52 +08:00
|
|
|
s->prev = *ps, *ps = s;
|
|
|
|
if (s->v < SYM_FIRST_ANOM) {
|
2021-10-22 13:39:54 +08:00
|
|
|
ps = &table_ident[s->v - TOK_IDENT]->sym_identifier;
|
2019-06-22 10:00:52 +08:00
|
|
|
s->prev_tok = *ps, *ps = s;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-07-16 23:30:04 +08:00
|
|
|
/* copy s->type.ref to stack 'ps' for VT_FUNC and VT_PTR */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void sym_copy_ref(Sym *s, Sym **ps)
|
2019-06-22 10:00:52 +08:00
|
|
|
{
|
2019-07-16 23:30:04 +08:00
|
|
|
int bt = s->type.t & VT_BTYPE;
|
2021-02-13 10:20:44 +08:00
|
|
|
if (bt == VT_FUNC || bt == VT_PTR || (bt == VT_STRUCT && s->sym_scope)) {
|
2019-07-16 23:30:04 +08:00
|
|
|
Sym **sp = &s->type.ref;
|
|
|
|
for (s = *sp, *sp = NULL; s; s = s->next) {
|
2021-10-22 13:39:54 +08:00
|
|
|
Sym *s2 = sym_copy(s, ps);
|
2019-07-16 23:30:04 +08:00
|
|
|
sp = &(*sp = s2)->next;
|
2021-10-22 13:39:54 +08:00
|
|
|
sym_copy_ref(s2, ps);
|
2019-07-16 23:30:04 +08:00
|
|
|
}
|
|
|
|
}
|
2019-06-22 10:00:52 +08:00
|
|
|
}
|
|
|
|
|
2015-11-20 18:22:56 +08:00
|
|
|
/* define a new external reference to a symbol 'v' */
|
2021-10-22 13:39:54 +08:00
|
|
|
static Sym *external_sym(int v, CType *type, int r, AttributeDef *ad)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2019-07-16 23:30:04 +08:00
|
|
|
Sym *s;
|
2019-06-22 10:00:52 +08:00
|
|
|
|
|
|
|
/* look for global symbol */
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_find(v);
|
2019-06-22 10:00:52 +08:00
|
|
|
while (s && s->sym_scope)
|
|
|
|
s = s->prev_tok;
|
|
|
|
|
|
|
|
if (!s) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* push forward reference */
|
2021-10-22 13:39:54 +08:00
|
|
|
s = global_identifier_push(v, type->t, 0);
|
2019-06-22 10:00:52 +08:00
|
|
|
s->r |= r;
|
2017-07-09 18:34:11 +08:00
|
|
|
s->a = ad->a;
|
2019-05-15 03:34:28 +08:00
|
|
|
s->asm_label = ad->asm_label;
|
2019-06-22 10:00:52 +08:00
|
|
|
s->type.ref = type->ref;
|
2019-07-16 23:30:04 +08:00
|
|
|
/* copy type to the global stack */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (local_stack)
|
|
|
|
sym_copy_ref(s, &global_stack);
|
2017-04-04 14:34:52 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
patch_storage(s, ad, type);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2019-07-16 23:30:04 +08:00
|
|
|
/* push variables on local_stack if any */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (local_stack && (s->type.t & VT_BTYPE) != VT_FUNC)
|
|
|
|
s = sym_copy(s, &local_stack);
|
2009-05-06 02:18:10 +08:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
tccgen: arm/i386: save_reg_upstack
tccgen.c:gv() when loading long long from lvalue, before
was saving all registers which caused problems in the arm
function call register parameter preparation, as with
void foo(long long y, int x);
int main(void)
{
unsigned int *xx[1], x;
unsigned long long *yy[1], y;
foo(**yy, **xx);
return 0;
}
Now only the modified register is saved if necessary,
as in this case where it is used to store the result
of the post-inc:
long long *p, v, **pp;
v = 1;
p = &v;
p[0]++;
printf("another long long spill test : %lld\n", *p);
i386-gen.c :
- found a similar problem with TOK_UMULL caused by the
vstack juggle in tccgen:gen_opl()
(bug seen only when using EBX as 4th register)
2016-10-04 23:36:51 +08:00
|
|
|
/* save registers up to (vtop - n) stack entry */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void save_regs(int n)
|
tccgen: arm/i386: save_reg_upstack
tccgen.c:gv() when loading long long from lvalue, before
was saving all registers which caused problems in the arm
function call register parameter preparation, as with
void foo(long long y, int x);
int main(void)
{
unsigned int *xx[1], x;
unsigned long long *yy[1], y;
foo(**yy, **xx);
return 0;
}
Now only the modified register is saved if necessary,
as in this case where it is used to store the result
of the post-inc:
long long *p, v, **pp;
v = 1;
p = &v;
p[0]++;
printf("another long long spill test : %lld\n", *p);
i386-gen.c :
- found a similar problem with TOK_UMULL caused by the
vstack juggle in tccgen:gen_opl()
(bug seen only when using EBX as 4th register)
2016-10-04 23:36:51 +08:00
|
|
|
{
|
|
|
|
SValue *p, *p1;
|
2021-10-22 13:39:54 +08:00
|
|
|
for(p = vstack, p1 = vtop - n; p <= p1; p++)
|
|
|
|
save_reg(p->r);
|
tccgen: arm/i386: save_reg_upstack
tccgen.c:gv() when loading long long from lvalue, before
was saving all registers which caused problems in the arm
function call register parameter preparation, as with
void foo(long long y, int x);
int main(void)
{
unsigned int *xx[1], x;
unsigned long long *yy[1], y;
foo(**yy, **xx);
return 0;
}
Now only the modified register is saved if necessary,
as in this case where it is used to store the result
of the post-inc:
long long *p, v, **pp;
v = 1;
p = &v;
p[0]++;
printf("another long long spill test : %lld\n", *p);
i386-gen.c :
- found a similar problem with TOK_UMULL caused by the
vstack juggle in tccgen:gen_opl()
(bug seen only when using EBX as 4th register)
2016-10-04 23:36:51 +08:00
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* save r to the memory stack, and mark it as being free */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void save_reg(int r)
|
tccgen: arm/i386: save_reg_upstack
tccgen.c:gv() when loading long long from lvalue, before
was saving all registers which caused problems in the arm
function call register parameter preparation, as with
void foo(long long y, int x);
int main(void)
{
unsigned int *xx[1], x;
unsigned long long *yy[1], y;
foo(**yy, **xx);
return 0;
}
Now only the modified register is saved if necessary,
as in this case where it is used to store the result
of the post-inc:
long long *p, v, **pp;
v = 1;
p = &v;
p[0]++;
printf("another long long spill test : %lld\n", *p);
i386-gen.c :
- found a similar problem with TOK_UMULL caused by the
vstack juggle in tccgen:gen_opl()
(bug seen only when using EBX as 4th register)
2016-10-04 23:36:51 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
save_reg_upstack(r, 0);
|
tccgen: arm/i386: save_reg_upstack
tccgen.c:gv() when loading long long from lvalue, before
was saving all registers which caused problems in the arm
function call register parameter preparation, as with
void foo(long long y, int x);
int main(void)
{
unsigned int *xx[1], x;
unsigned long long *yy[1], y;
foo(**yy, **xx);
return 0;
}
Now only the modified register is saved if necessary,
as in this case where it is used to store the result
of the post-inc:
long long *p, v, **pp;
v = 1;
p = &v;
p[0]++;
printf("another long long spill test : %lld\n", *p);
i386-gen.c :
- found a similar problem with TOK_UMULL caused by the
vstack juggle in tccgen:gen_opl()
(bug seen only when using EBX as 4th register)
2016-10-04 23:36:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* save r to the memory stack, and mark it as being free,
|
|
|
|
if seen up to (vtop - n) stack entry */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void save_reg_upstack(int r, int n)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2019-12-17 01:51:28 +08:00
|
|
|
int l, size, align, bt;
|
tccgen: arm/i386: save_reg_upstack
tccgen.c:gv() when loading long long from lvalue, before
was saving all registers which caused problems in the arm
function call register parameter preparation, as with
void foo(long long y, int x);
int main(void)
{
unsigned int *xx[1], x;
unsigned long long *yy[1], y;
foo(**yy, **xx);
return 0;
}
Now only the modified register is saved if necessary,
as in this case where it is used to store the result
of the post-inc:
long long *p, v, **pp;
v = 1;
p = &v;
p[0]++;
printf("another long long spill test : %lld\n", *p);
i386-gen.c :
- found a similar problem with TOK_UMULL caused by the
vstack juggle in tccgen:gen_opl()
(bug seen only when using EBX as 4th register)
2016-10-04 23:36:51 +08:00
|
|
|
SValue *p, *p1, sv;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
tccgen: arm/i386: save_reg_upstack
tccgen.c:gv() when loading long long from lvalue, before
was saving all registers which caused problems in the arm
function call register parameter preparation, as with
void foo(long long y, int x);
int main(void)
{
unsigned int *xx[1], x;
unsigned long long *yy[1], y;
foo(**yy, **xx);
return 0;
}
Now only the modified register is saved if necessary,
as in this case where it is used to store the result
of the post-inc:
long long *p, v, **pp;
v = 1;
p = &v;
p[0]++;
printf("another long long spill test : %lld\n", *p);
i386-gen.c :
- found a similar problem with TOK_UMULL caused by the
vstack juggle in tccgen:gen_opl()
(bug seen only when using EBX as 4th register)
2016-10-04 23:36:51 +08:00
|
|
|
if ((r &= VT_VALMASK) >= VT_CONST)
|
|
|
|
return;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (nocode_wanted)
|
2016-12-19 00:23:33 +08:00
|
|
|
return;
|
2009-05-06 02:18:10 +08:00
|
|
|
l = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
for(p = vstack, p1 = vtop - n; p <= p1; p++) {
|
2019-12-17 01:51:28 +08:00
|
|
|
if ((p->r & VT_VALMASK) == r || p->r2 == r) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* must save value on stack if not already done */
|
2019-12-17 01:51:28 +08:00
|
|
|
if (!l) {
|
|
|
|
bt = p->type.t & VT_BTYPE;
|
|
|
|
if (bt == VT_VOID)
|
|
|
|
continue;
|
|
|
|
if ((p->r & VT_LVAL) || bt == VT_FUNC)
|
|
|
|
bt = VT_PTR;
|
|
|
|
sv.type.t = bt;
|
|
|
|
size = type_size(&sv.type, &align);
|
2021-10-22 13:39:54 +08:00
|
|
|
l = get_temp_local_var(size,align);
|
2009-05-06 02:18:10 +08:00
|
|
|
sv.r = VT_LOCAL | VT_LVAL;
|
2019-01-10 03:32:23 +08:00
|
|
|
sv.c.i = l;
|
2021-10-22 13:39:54 +08:00
|
|
|
store(p->r & VT_VALMASK, &sv);
|
2009-05-06 02:18:10 +08:00
|
|
|
#if defined(TCC_TARGET_I386) || defined(TCC_TARGET_X86_64)
|
|
|
|
/* x86 specific: need to pop fp register ST0 if saved */
|
|
|
|
if (r == TREG_ST0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
o(0xd8dd); /* fstp %st(0) */
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
#endif
|
2019-12-17 01:44:35 +08:00
|
|
|
/* special long long case */
|
2019-12-17 01:51:28 +08:00
|
|
|
if (p->r2 < VT_CONST && USING_TWO_WORDS(bt)) {
|
2019-07-18 10:51:52 +08:00
|
|
|
sv.c.i += PTR_SIZE;
|
2021-10-22 13:39:54 +08:00
|
|
|
store(p->r2, &sv);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* mark that stack entry as being saved on the stack */
|
|
|
|
if (p->r & VT_LVAL) {
|
|
|
|
/* also clear the bounded flag because the
|
|
|
|
relocation address of the function was stored in
|
2015-11-18 03:09:35 +08:00
|
|
|
p->c.i */
|
2009-05-06 02:18:10 +08:00
|
|
|
p->r = (p->r & ~(VT_VALMASK | VT_BOUNDED)) | VT_LLOCAL;
|
|
|
|
} else {
|
2019-12-17 01:48:31 +08:00
|
|
|
p->r = VT_LVAL | VT_LOCAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-02-13 07:24:29 +08:00
|
|
|
p->sym = NULL;
|
2009-05-06 02:18:10 +08:00
|
|
|
p->r2 = VT_CONST;
|
2015-11-18 03:09:35 +08:00
|
|
|
p->c.i = l;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-20 08:53:49 +08:00
|
|
|
#ifdef TCC_TARGET_ARM
|
2009-05-06 02:18:10 +08:00
|
|
|
/* find a register of class 'rc2' with at most one reference on stack.
|
|
|
|
* If none, call get_reg(rc) */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC int get_reg_ex(int rc, int rc2)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
SValue *p;
|
2015-07-30 04:53:57 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
for(r=0;r<NB_REGS;r++) {
|
|
|
|
if (reg_classes[r] & rc2) {
|
|
|
|
int n;
|
|
|
|
n=0;
|
2021-10-22 13:39:54 +08:00
|
|
|
for(p = vstack; p <= vtop; p++) {
|
2009-05-06 02:18:10 +08:00
|
|
|
if ((p->r & VT_VALMASK) == r ||
|
2019-12-17 01:48:31 +08:00
|
|
|
p->r2 == r)
|
2009-05-06 02:18:10 +08:00
|
|
|
n++;
|
|
|
|
}
|
|
|
|
if (n <= 1)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
return get_reg(rc);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2009-12-20 08:53:49 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
/* find a free register of class 'rc'. If none, save one register */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC int get_reg(int rc)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
SValue *p;
|
|
|
|
|
|
|
|
/* find a free register */
|
|
|
|
for(r=0;r<NB_REGS;r++) {
|
|
|
|
if (reg_classes[r] & rc) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (nocode_wanted)
|
2016-12-19 00:23:33 +08:00
|
|
|
return r;
|
2021-10-22 13:39:54 +08:00
|
|
|
for(p=vstack;p<=vtop;p++) {
|
2009-05-06 02:18:10 +08:00
|
|
|
if ((p->r & VT_VALMASK) == r ||
|
2019-12-17 01:48:31 +08:00
|
|
|
p->r2 == r)
|
2009-05-06 02:18:10 +08:00
|
|
|
goto notfound;
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
notfound: ;
|
|
|
|
}
|
2015-07-30 04:53:57 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* no register left : free the first one on the stack (VERY
|
|
|
|
IMPORTANT to start from the bottom to ensure that we don't
|
|
|
|
spill registers used in gen_opi()) */
|
2021-10-22 13:39:54 +08:00
|
|
|
for(p=vstack;p<=vtop;p++) {
|
2012-07-12 05:39:05 +08:00
|
|
|
/* look at second register (if long long) */
|
2019-12-17 01:48:31 +08:00
|
|
|
r = p->r2;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (r < VT_CONST && (reg_classes[r] & rc))
|
|
|
|
goto save_found;
|
2012-07-12 05:39:05 +08:00
|
|
|
r = p->r & VT_VALMASK;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (r < VT_CONST && (reg_classes[r] & rc)) {
|
|
|
|
save_found:
|
2021-10-22 13:39:54 +08:00
|
|
|
save_reg(r);
|
2009-05-06 02:18:10 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Should never comes here */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-01-10 03:32:23 +08:00
|
|
|
/* find a free temporary local variable (return the offset on stack) match the size and align. If none, add new temporary stack variable*/
|
2021-10-22 13:39:54 +08:00
|
|
|
static int get_temp_local_var(int size,int align){
|
2019-01-10 03:32:23 +08:00
|
|
|
int i;
|
2021-10-22 13:39:54 +08:00
|
|
|
struct temp_local_variable *temp_var;
|
2019-01-10 03:32:23 +08:00
|
|
|
int found_var;
|
|
|
|
SValue *p;
|
|
|
|
int r;
|
|
|
|
char free;
|
|
|
|
char found;
|
|
|
|
found=0;
|
2021-10-22 13:39:54 +08:00
|
|
|
for(i=0;i<nb_temp_local_vars;i++){
|
|
|
|
temp_var=&arr_temp_local_vars[i];
|
2020-01-16 08:19:59 +08:00
|
|
|
if(temp_var->size<size||align!=temp_var->align){
|
2019-01-10 03:32:23 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/*check if temp_var is free*/
|
|
|
|
free=1;
|
2021-10-22 13:39:54 +08:00
|
|
|
for(p=vstack;p<=vtop;p++) {
|
2019-01-10 03:32:23 +08:00
|
|
|
r=p->r&VT_VALMASK;
|
|
|
|
if(r==VT_LOCAL||r==VT_LLOCAL){
|
|
|
|
if(p->c.i==temp_var->location){
|
|
|
|
free=0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(free){
|
|
|
|
found_var=temp_var->location;
|
|
|
|
found=1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(!found){
|
2021-10-22 13:39:54 +08:00
|
|
|
loc = (loc - size) & -align;
|
|
|
|
if(nb_temp_local_vars<MAX_TEMP_LOCAL_VARIABLE_NUMBER){
|
|
|
|
temp_var=&arr_temp_local_vars[i];
|
|
|
|
temp_var->location=loc;
|
2019-01-10 03:32:23 +08:00
|
|
|
temp_var->size=size;
|
|
|
|
temp_var->align=align;
|
2021-10-22 13:39:54 +08:00
|
|
|
nb_temp_local_vars++;
|
2019-01-10 03:32:23 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
found_var=loc;
|
2019-01-10 03:32:23 +08:00
|
|
|
}
|
|
|
|
return found_var;
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void clear_temp_local_var_list(){
|
|
|
|
nb_temp_local_vars=0;
|
2019-01-10 03:32:23 +08:00
|
|
|
}
|
|
|
|
|
2013-04-20 01:31:24 +08:00
|
|
|
/* move register 's' (of type 't') to 'r', and flush previous value of r to memory
|
2009-05-06 02:18:10 +08:00
|
|
|
if needed */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void move_reg(int r, int s, int t)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
SValue sv;
|
|
|
|
|
|
|
|
if (r != s) {
|
2021-10-22 13:39:54 +08:00
|
|
|
save_reg(r);
|
2013-04-20 01:31:24 +08:00
|
|
|
sv.type.t = t;
|
|
|
|
sv.type.ref = NULL;
|
2009-05-06 02:18:10 +08:00
|
|
|
sv.r = s;
|
2015-11-18 03:09:35 +08:00
|
|
|
sv.c.i = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
load(r, &sv);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get address of vtop (vtop MUST BE an lvalue) */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void gaddrof(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r &= ~VT_LVAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* tricky: if saved lvalue, then we can go back to lvalue */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r & VT_VALMASK) == VT_LLOCAL)
|
|
|
|
vtop->r = (vtop->r & ~VT_VALMASK) | VT_LOCAL | VT_LVAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2020-07-12 16:55:40 +08:00
|
|
|
/* generate a bounded pointer addition */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_bounded_ptr_add(void)
|
2020-07-12 16:55:40 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
int save = (vtop[-1].r & VT_VALMASK) == VT_LOCAL;
|
2020-08-11 13:33:11 +08:00
|
|
|
if (save) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushv(&vtop[-1]);
|
|
|
|
vrott(3);
|
|
|
|
}
|
|
|
|
vpush_helper_func(TOK___bound_ptr_add);
|
|
|
|
vrott(3);
|
|
|
|
gfunc_call(2);
|
|
|
|
vtop -= save;
|
|
|
|
vpushi(0);
|
2020-07-12 16:55:40 +08:00
|
|
|
/* returned pointer is in REG_IRET */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r = REG_IRET | VT_BOUNDED;
|
|
|
|
if (nocode_wanted)
|
2020-07-12 16:55:40 +08:00
|
|
|
return;
|
|
|
|
/* relocation offset of the bounding function call point */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW_Rel));
|
2020-07-12 16:55:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* patch pointer addition in vtop so that pointer dereferencing is
|
|
|
|
also tested */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_bounded_ptr_deref(void)
|
2020-07-12 16:55:40 +08:00
|
|
|
{
|
|
|
|
addr_t func;
|
|
|
|
int size, align;
|
|
|
|
ElfW_Rel *rel;
|
|
|
|
Sym *sym;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (nocode_wanted)
|
2020-07-12 16:55:40 +08:00
|
|
|
return;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
size = type_size(&vtop->type, &align);
|
2020-07-12 16:55:40 +08:00
|
|
|
switch(size) {
|
|
|
|
case 1: func = TOK___bound_ptr_indir1; break;
|
|
|
|
case 2: func = TOK___bound_ptr_indir2; break;
|
|
|
|
case 4: func = TOK___bound_ptr_indir4; break;
|
|
|
|
case 8: func = TOK___bound_ptr_indir8; break;
|
|
|
|
case 12: func = TOK___bound_ptr_indir12; break;
|
|
|
|
case 16: func = TOK___bound_ptr_indir16; break;
|
|
|
|
default:
|
|
|
|
/* may happen with struct member access */
|
|
|
|
return;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = external_helper_sym(func);
|
2020-07-12 16:55:40 +08:00
|
|
|
if (!sym->c)
|
2021-10-22 13:39:54 +08:00
|
|
|
put_extern_sym(sym, NULL, 0, 0);
|
2020-07-12 16:55:40 +08:00
|
|
|
/* patch relocation */
|
|
|
|
/* XXX: find a better solution ? */
|
2021-10-22 13:39:54 +08:00
|
|
|
rel = (ElfW_Rel *)(cur_text_section->reloc->data + vtop->c.i);
|
2020-07-12 16:55:40 +08:00
|
|
|
rel->r_info = ELFW(R_INFO)(sym->c, ELFW(R_TYPE)(rel->r_info));
|
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* generate lvalue bound code */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gbound(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
CType type1;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r &= ~VT_MUSTBOUND;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* if lvalue, then use checking code before dereferencing */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r & VT_LVAL) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* if not VT_BOUNDED value, then make one */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!(vtop->r & VT_BOUNDED)) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* must save type because we must set it to int to get pointer */
|
2021-10-22 13:39:54 +08:00
|
|
|
type1 = vtop->type;
|
|
|
|
vtop->type.t = VT_PTR;
|
|
|
|
gaddrof();
|
|
|
|
vpushi(0);
|
|
|
|
gen_bounded_ptr_add();
|
|
|
|
vtop->r |= VT_LVAL;
|
|
|
|
vtop->type = type1;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
/* then check for dereferencing */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_bounded_ptr_deref();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
bcheck cleanup
- revert Makefiles to state before last bcheck additions
Instead, just load bcheck.o explicitly if that is
what is wanted.
- move tcc_add_bcheck() to the <target>-link.c files and
remove revently added arguments. This function is to
support tccelf.c with linking, not for tccgen.c to
support compilation.
- remove -ba option: It said:
"-ba Enable better address checking with bounds checker"
Okay, if it is better then to have it is not an option.
- remove va_copy. It is C99 and we try to stay C89 in tinycc
when possible. For example, MS compilers do not have va_copy.
- win64: revert any 'fixes' to alloca
It was correct as it was before, except for bound_checking
where it was not implemented. This should now work too.
- remove parasitic filename:linenum features
Such feature is already present with rt_printline in
tccrun.c. If it doesn't work it can be fixed.
- revert changes to gen_bounded_ptr_add()
gen_bounded_ptr_add() was working as it should before
(mostly). For the sake of simplicity I switched it to
CDECL. Anyway, FASTCALL means SLOWCALL with tinycc.
In exchange you get one addition which is required for
bounds_cnecking function arguments. The important thing
is to check them *BEFORE* they are loaded into registers.
New function gbound_args() does that.
In any case, code instrumentation with the bounds-check
functions as such now seems to work flawlessly again,
which means when they are inserted as NOPs, any code that
tcc can compile, seems to behave just the same as without
them.
What these functions then do when fully enabled, is a
differnt story. I did not touch this.
2019-12-12 22:45:45 +08:00
|
|
|
|
|
|
|
/* we need to call __bound_ptr_add before we start to load function
|
|
|
|
args into registers */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void gbound_args(int nb_args)
|
bcheck cleanup
- revert Makefiles to state before last bcheck additions
Instead, just load bcheck.o explicitly if that is
what is wanted.
- move tcc_add_bcheck() to the <target>-link.c files and
remove revently added arguments. This function is to
support tccelf.c with linking, not for tccgen.c to
support compilation.
- remove -ba option: It said:
"-ba Enable better address checking with bounds checker"
Okay, if it is better then to have it is not an option.
- remove va_copy. It is C99 and we try to stay C89 in tinycc
when possible. For example, MS compilers do not have va_copy.
- win64: revert any 'fixes' to alloca
It was correct as it was before, except for bound_checking
where it was not implemented. This should now work too.
- remove parasitic filename:linenum features
Such feature is already present with rt_printline in
tccrun.c. If it doesn't work it can be fixed.
- revert changes to gen_bounded_ptr_add()
gen_bounded_ptr_add() was working as it should before
(mostly). For the sake of simplicity I switched it to
CDECL. Anyway, FASTCALL means SLOWCALL with tinycc.
In exchange you get one addition which is required for
bounds_cnecking function arguments. The important thing
is to check them *BEFORE* they are loaded into registers.
New function gbound_args() does that.
In any case, code instrumentation with the bounds-check
functions as such now seems to work flawlessly again,
which means when they are inserted as NOPs, any code that
tcc can compile, seems to behave just the same as without
them.
What these functions then do when fully enabled, is a
differnt story. I did not touch this.
2019-12-12 22:45:45 +08:00
|
|
|
{
|
2020-07-06 06:00:42 +08:00
|
|
|
int i, v;
|
|
|
|
SValue *sv;
|
|
|
|
|
bcheck cleanup
- revert Makefiles to state before last bcheck additions
Instead, just load bcheck.o explicitly if that is
what is wanted.
- move tcc_add_bcheck() to the <target>-link.c files and
remove revently added arguments. This function is to
support tccelf.c with linking, not for tccgen.c to
support compilation.
- remove -ba option: It said:
"-ba Enable better address checking with bounds checker"
Okay, if it is better then to have it is not an option.
- remove va_copy. It is C99 and we try to stay C89 in tinycc
when possible. For example, MS compilers do not have va_copy.
- win64: revert any 'fixes' to alloca
It was correct as it was before, except for bound_checking
where it was not implemented. This should now work too.
- remove parasitic filename:linenum features
Such feature is already present with rt_printline in
tccrun.c. If it doesn't work it can be fixed.
- revert changes to gen_bounded_ptr_add()
gen_bounded_ptr_add() was working as it should before
(mostly). For the sake of simplicity I switched it to
CDECL. Anyway, FASTCALL means SLOWCALL with tinycc.
In exchange you get one addition which is required for
bounds_cnecking function arguments. The important thing
is to check them *BEFORE* they are loaded into registers.
New function gbound_args() does that.
In any case, code instrumentation with the bounds-check
functions as such now seems to work flawlessly again,
which means when they are inserted as NOPs, any code that
tcc can compile, seems to behave just the same as without
them.
What these functions then do when fully enabled, is a
differnt story. I did not touch this.
2019-12-12 22:45:45 +08:00
|
|
|
for (i = 1; i <= nb_args; ++i)
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop[1 - i].r & VT_MUSTBOUND) {
|
|
|
|
vrotb(i);
|
|
|
|
gbound();
|
|
|
|
vrott(i);
|
bcheck cleanup
- revert Makefiles to state before last bcheck additions
Instead, just load bcheck.o explicitly if that is
what is wanted.
- move tcc_add_bcheck() to the <target>-link.c files and
remove revently added arguments. This function is to
support tccelf.c with linking, not for tccgen.c to
support compilation.
- remove -ba option: It said:
"-ba Enable better address checking with bounds checker"
Okay, if it is better then to have it is not an option.
- remove va_copy. It is C99 and we try to stay C89 in tinycc
when possible. For example, MS compilers do not have va_copy.
- win64: revert any 'fixes' to alloca
It was correct as it was before, except for bound_checking
where it was not implemented. This should now work too.
- remove parasitic filename:linenum features
Such feature is already present with rt_printline in
tccrun.c. If it doesn't work it can be fixed.
- revert changes to gen_bounded_ptr_add()
gen_bounded_ptr_add() was working as it should before
(mostly). For the sake of simplicity I switched it to
CDECL. Anyway, FASTCALL means SLOWCALL with tinycc.
In exchange you get one addition which is required for
bounds_cnecking function arguments. The important thing
is to check them *BEFORE* they are loaded into registers.
New function gbound_args() does that.
In any case, code instrumentation with the bounds-check
functions as such now seems to work flawlessly again,
which means when they are inserted as NOPs, any code that
tcc can compile, seems to behave just the same as without
them.
What these functions then do when fully enabled, is a
differnt story. I did not touch this.
2019-12-12 22:45:45 +08:00
|
|
|
}
|
2020-07-06 06:00:42 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
sv = vtop - nb_args;
|
2020-07-06 06:00:42 +08:00
|
|
|
if (sv->r & VT_SYM) {
|
|
|
|
v = sv->sym->v;
|
|
|
|
if (v == TOK_setjmp
|
|
|
|
|| v == TOK__setjmp
|
|
|
|
#ifndef TCC_TARGET_PE
|
|
|
|
|| v == TOK_sigsetjmp
|
|
|
|
|| v == TOK___sigsetjmp
|
|
|
|
#endif
|
|
|
|
) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK___bound_setjmp);
|
|
|
|
vpushv(sv + 1);
|
|
|
|
gfunc_call(1);
|
|
|
|
func_bound_add_epilog = 1;
|
2020-07-06 06:00:42 +08:00
|
|
|
}
|
2020-07-07 02:10:56 +08:00
|
|
|
#if defined TCC_TARGET_I386 || defined TCC_TARGET_X86_64
|
2020-07-06 06:00:42 +08:00
|
|
|
if (v == TOK_alloca)
|
2021-10-22 13:39:54 +08:00
|
|
|
func_bound_add_epilog = 1;
|
2020-12-22 14:02:47 +08:00
|
|
|
#endif
|
|
|
|
#if TARGETOS_NetBSD
|
|
|
|
if (v == TOK_longjmp) /* undo rename to __longjmp14 */
|
|
|
|
sv->sym->asm_label = TOK___bound_longjmp;
|
2020-07-07 02:10:56 +08:00
|
|
|
#endif
|
2020-07-06 06:00:42 +08:00
|
|
|
}
|
bcheck cleanup
- revert Makefiles to state before last bcheck additions
Instead, just load bcheck.o explicitly if that is
what is wanted.
- move tcc_add_bcheck() to the <target>-link.c files and
remove revently added arguments. This function is to
support tccelf.c with linking, not for tccgen.c to
support compilation.
- remove -ba option: It said:
"-ba Enable better address checking with bounds checker"
Okay, if it is better then to have it is not an option.
- remove va_copy. It is C99 and we try to stay C89 in tinycc
when possible. For example, MS compilers do not have va_copy.
- win64: revert any 'fixes' to alloca
It was correct as it was before, except for bound_checking
where it was not implemented. This should now work too.
- remove parasitic filename:linenum features
Such feature is already present with rt_printline in
tccrun.c. If it doesn't work it can be fixed.
- revert changes to gen_bounded_ptr_add()
gen_bounded_ptr_add() was working as it should before
(mostly). For the sake of simplicity I switched it to
CDECL. Anyway, FASTCALL means SLOWCALL with tinycc.
In exchange you get one addition which is required for
bounds_cnecking function arguments. The important thing
is to check them *BEFORE* they are loaded into registers.
New function gbound_args() does that.
In any case, code instrumentation with the bounds-check
functions as such now seems to work flawlessly again,
which means when they are inserted as NOPs, any code that
tcc can compile, seems to behave just the same as without
them.
What these functions then do when fully enabled, is a
differnt story. I did not touch this.
2019-12-12 22:45:45 +08:00
|
|
|
}
|
2020-01-15 15:53:19 +08:00
|
|
|
|
2020-01-16 08:19:59 +08:00
|
|
|
/* Add bounds for local symbols from S to E (via ->prev) */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void add_local_bounds(Sym *s, Sym *e)
|
2020-01-16 08:19:59 +08:00
|
|
|
{
|
|
|
|
for (; s != e; s = s->prev) {
|
|
|
|
if (!s->v || (s->r & VT_VALMASK) != VT_LOCAL)
|
|
|
|
continue;
|
|
|
|
/* Add arrays/structs/unions because we always take address */
|
|
|
|
if ((s->type.t & VT_ARRAY)
|
|
|
|
|| (s->type.t & VT_BTYPE) == VT_STRUCT
|
|
|
|
|| s->a.addrtaken) {
|
|
|
|
/* add local bound info */
|
|
|
|
int align, size = type_size(&s->type, &align);
|
2021-10-22 13:39:54 +08:00
|
|
|
addr_t *bounds_ptr = section_ptr_add(lbounds_section,
|
2020-01-16 08:19:59 +08:00
|
|
|
2 * sizeof(addr_t));
|
|
|
|
bounds_ptr[0] = s->c;
|
|
|
|
bounds_ptr[1] = size;
|
|
|
|
}
|
|
|
|
}
|
2020-01-15 15:53:19 +08:00
|
|
|
}
|
2020-01-16 08:19:59 +08:00
|
|
|
#endif
|
2020-01-15 15:53:19 +08:00
|
|
|
|
2020-01-16 08:19:59 +08:00
|
|
|
/* Wrapper around sym_pop, that potentially also registers local bounds. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void pop_local_syms(Sym *b, int keep)
|
2020-01-15 15:53:19 +08:00
|
|
|
{
|
2020-01-16 08:19:59 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->do_bounds_check && !keep && (local_scope || !func_var))
|
|
|
|
add_local_bounds(local_stack, b);
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
|
|
|
tcc_add_debug_info (tcc_state, !local_scope, local_stack, b);
|
|
|
|
sym_pop(&local_stack, b, keep);
|
2020-01-16 08:19:59 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void incr_bf_adr(int o)
|
2017-07-09 18:38:59 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type = char_pointer_type;
|
|
|
|
gaddrof();
|
|
|
|
vpushs(o);
|
|
|
|
gen_op('+');
|
|
|
|
vtop->type.t = VT_BYTE | VT_UNSIGNED;
|
|
|
|
vtop->r |= VT_LVAL;
|
2017-07-09 18:38:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* single-byte load mode for packed or otherwise unaligned bitfields */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void load_packed_bf(CType *type, int bit_pos, int bit_size)
|
2017-07-09 18:38:59 +08:00
|
|
|
{
|
|
|
|
int n, o, bits;
|
2021-10-22 13:39:54 +08:00
|
|
|
save_reg_upstack(vtop->r, 1);
|
|
|
|
vpush64(type->t & VT_BTYPE, 0); // B X
|
2017-07-09 18:38:59 +08:00
|
|
|
bits = 0, o = bit_pos >> 3, bit_pos &= 7;
|
|
|
|
do {
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap(); // X B
|
|
|
|
incr_bf_adr(o);
|
|
|
|
vdup(); // X B B
|
2017-07-09 18:38:59 +08:00
|
|
|
n = 8 - bit_pos;
|
|
|
|
if (n > bit_size)
|
|
|
|
n = bit_size;
|
|
|
|
if (bit_pos)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(bit_pos), gen_op(TOK_SHR), bit_pos = 0; // X B Y
|
2017-07-09 18:38:59 +08:00
|
|
|
if (n < 8)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi((1 << n) - 1), gen_op('&');
|
|
|
|
gen_cast(type);
|
2017-07-09 18:38:59 +08:00
|
|
|
if (bits)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(bits), gen_op(TOK_SHL);
|
|
|
|
vrotb(3); // B Y X
|
|
|
|
gen_op('|'); // B X
|
2017-07-09 18:38:59 +08:00
|
|
|
bits += n, bit_size -= n, o = 1;
|
|
|
|
} while (bit_size);
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap(), vpop();
|
2017-07-09 18:38:59 +08:00
|
|
|
if (!(type->t & VT_UNSIGNED)) {
|
|
|
|
n = ((type->t & VT_BTYPE) == VT_LLONG ? 64 : 32) - bits;
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(n), gen_op(TOK_SHL);
|
|
|
|
vpushi(n), gen_op(TOK_SAR);
|
2017-07-09 18:38:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* single-byte store mode for packed or otherwise unaligned bitfields */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void store_packed_bf(int bit_pos, int bit_size)
|
2017-07-09 18:38:59 +08:00
|
|
|
{
|
|
|
|
int bits, n, o, m, c;
|
2021-10-22 13:39:54 +08:00
|
|
|
c = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
|
|
|
|
vswap(); // X B
|
|
|
|
save_reg_upstack(vtop->r, 1);
|
2017-07-09 18:38:59 +08:00
|
|
|
bits = 0, o = bit_pos >> 3, bit_pos &= 7;
|
|
|
|
do {
|
2021-10-22 13:39:54 +08:00
|
|
|
incr_bf_adr(o); // X B
|
|
|
|
vswap(); //B X
|
|
|
|
c ? vdup() : gv_dup(); // B V X
|
|
|
|
vrott(3); // X B V
|
2017-07-09 18:38:59 +08:00
|
|
|
if (bits)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(bits), gen_op(TOK_SHR);
|
2017-07-09 18:38:59 +08:00
|
|
|
if (bit_pos)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(bit_pos), gen_op(TOK_SHL);
|
2017-07-09 18:38:59 +08:00
|
|
|
n = 8 - bit_pos;
|
|
|
|
if (n > bit_size)
|
|
|
|
n = bit_size;
|
|
|
|
if (n < 8) {
|
|
|
|
m = ((1 << n) - 1) << bit_pos;
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(m), gen_op('&'); // X B V1
|
|
|
|
vpushv(vtop-1); // X B V1 B
|
|
|
|
vpushi(m & 0x80 ? ~m & 0x7f : ~m);
|
|
|
|
gen_op('&'); // X B V1 B1
|
|
|
|
gen_op('|'); // X B V2
|
|
|
|
}
|
|
|
|
vdup(), vtop[-1] = vtop[-2]; // X B B V2
|
|
|
|
vstore(), vpop(); // X B
|
2017-07-09 18:38:59 +08:00
|
|
|
bits += n, bit_size -= n, bit_pos = 0, o = 1;
|
|
|
|
} while (bit_size);
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop(), vpop();
|
2017-07-09 18:38:59 +08:00
|
|
|
}
|
|
|
|
|
2017-07-15 01:26:01 +08:00
|
|
|
static int adjust_bf(SValue *sv, int bit_pos, int bit_size)
|
2017-07-09 18:38:59 +08:00
|
|
|
{
|
|
|
|
int t;
|
|
|
|
if (0 == sv->type.ref)
|
|
|
|
return 0;
|
|
|
|
t = sv->type.ref->auxtype;
|
|
|
|
if (t != -1 && t != VT_STRUCT) {
|
2020-08-09 13:50:34 +08:00
|
|
|
sv->type.t = (sv->type.t & ~(VT_BTYPE | VT_LONG)) | t;
|
2019-12-17 01:48:31 +08:00
|
|
|
sv->r |= VT_LVAL;
|
2017-07-09 18:38:59 +08:00
|
|
|
}
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* store vtop a register belonging to class 'rc'. lvalues are
|
|
|
|
converted to values. Cannot be used if cannot be converted to
|
|
|
|
register value (such as structures). */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC int gv(int rc)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2019-12-17 01:44:35 +08:00
|
|
|
int r, r2, r_ok, r2_ok, rc2, bt;
|
|
|
|
int bit_pos, bit_size, size, align;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
/* NOTE: get_reg can modify vstack[] */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->type.t & VT_BITFIELD) {
|
2009-05-06 02:18:10 +08:00
|
|
|
CType type;
|
2017-07-09 18:38:59 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
bit_pos = BIT_POS(vtop->type.t);
|
|
|
|
bit_size = BIT_SIZE(vtop->type.t);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* remove bit field info to avoid loops */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t &= ~VT_STRUCT_MASK;
|
2017-07-09 18:38:59 +08:00
|
|
|
|
|
|
|
type.ref = NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
type.t = vtop->type.t & VT_UNSIGNED;
|
|
|
|
if ((vtop->type.t & VT_BTYPE) == VT_BOOL)
|
2009-05-06 02:18:10 +08:00
|
|
|
type.t |= VT_UNSIGNED;
|
2017-07-09 18:38:59 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
r = adjust_bf(vtop, bit_pos, bit_size);
|
2017-07-09 18:38:59 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) == VT_LLONG)
|
2017-07-09 18:38:59 +08:00
|
|
|
type.t |= VT_LLONG;
|
|
|
|
else
|
|
|
|
type.t |= VT_INT;
|
|
|
|
|
|
|
|
if (r == VT_STRUCT) {
|
2021-10-22 13:39:54 +08:00
|
|
|
load_packed_bf(&type, bit_pos, bit_size);
|
2017-07-09 18:38:59 +08:00
|
|
|
} else {
|
|
|
|
int bits = (type.t & VT_BTYPE) == VT_LLONG ? 64 : 32;
|
|
|
|
/* cast to int to propagate signedness in following ops */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cast(&type);
|
2017-07-09 18:38:59 +08:00
|
|
|
/* generate shifts */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(bits - (bit_pos + bit_size));
|
|
|
|
gen_op(TOK_SHL);
|
|
|
|
vpushi(bits - bit_size);
|
2017-07-09 18:38:59 +08:00
|
|
|
/* NOTE: transformed to SHR if unsigned */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(TOK_SAR);
|
2017-07-09 18:38:59 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
r = gv(rc);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (is_float(vtop->type.t) &&
|
|
|
|
(vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* CPUs usually cannot use float constants, so we store them
|
|
|
|
generically in data segment */
|
2021-02-01 22:10:58 +08:00
|
|
|
init_params p = { rodata_section };
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
unsigned long offset;
|
2021-10-22 13:39:54 +08:00
|
|
|
size = type_size(&vtop->type, &align);
|
2017-07-24 03:24:11 +08:00
|
|
|
if (NODATA_WANTED)
|
|
|
|
size = 0, align = 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
offset = section_add(p.sec, size, align);
|
|
|
|
vpush_ref(&vtop->type, p.sec, offset, size);
|
|
|
|
vswap();
|
|
|
|
init_putv(&p, &vtop->type, offset);
|
|
|
|
vtop->r |= VT_LVAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r & VT_MUSTBOUND)
|
|
|
|
gbound();
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
2019-12-17 01:44:35 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
bt = vtop->type.t & VT_BTYPE;
|
2019-12-17 01:44:35 +08:00
|
|
|
|
2019-07-14 11:48:15 +08:00
|
|
|
#ifdef TCC_TARGET_RISCV64
|
|
|
|
/* XXX mega hack */
|
2019-12-17 01:44:35 +08:00
|
|
|
if (bt == VT_LDOUBLE && rc == RC_FLOAT)
|
2019-07-14 11:48:15 +08:00
|
|
|
rc = RC_INT;
|
|
|
|
#endif
|
2019-12-17 01:44:35 +08:00
|
|
|
rc2 = RC2_TYPE(bt, rc);
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
/* need to reload if:
|
|
|
|
- constant
|
|
|
|
- lvalue (need to dereference pointer)
|
|
|
|
- already a register, but not in the right class */
|
2021-10-22 13:39:54 +08:00
|
|
|
r = vtop->r & VT_VALMASK;
|
|
|
|
r_ok = !(vtop->r & VT_LVAL) && (r < VT_CONST) && (reg_classes[r] & rc);
|
|
|
|
r2_ok = !rc2 || ((vtop->r2 < VT_CONST) && (reg_classes[vtop->r2] & rc2));
|
2019-12-17 01:44:35 +08:00
|
|
|
|
|
|
|
if (!r_ok || !r2_ok) {
|
|
|
|
if (!r_ok)
|
2021-10-22 13:39:54 +08:00
|
|
|
r = get_reg(rc);
|
2019-12-17 01:44:35 +08:00
|
|
|
if (rc2) {
|
2019-12-17 01:51:28 +08:00
|
|
|
int load_type = (bt == VT_QFLOAT) ? VT_DOUBLE : VT_PTRDIFF_T;
|
2021-10-22 13:39:54 +08:00
|
|
|
int original_type = vtop->type.t;
|
2019-12-17 01:44:35 +08:00
|
|
|
|
|
|
|
/* two register type load :
|
|
|
|
expand to two words temporarily */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* load constant */
|
2021-10-22 13:39:54 +08:00
|
|
|
unsigned long long ll = vtop->c.i;
|
|
|
|
vtop->c.i = ll; /* first word */
|
|
|
|
load(r, vtop);
|
|
|
|
vtop->r = r; /* save register value */
|
|
|
|
vpushi(ll >> 32); /* second word */
|
|
|
|
} else if (vtop->r & VT_LVAL) {
|
2019-12-17 01:44:35 +08:00
|
|
|
/* We do not want to modifier the long long pointer here.
|
|
|
|
So we save any other instances down the stack */
|
2021-10-22 13:39:54 +08:00
|
|
|
save_reg_upstack(vtop->r, 1);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* load from memory */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = load_type;
|
|
|
|
load(r, vtop);
|
|
|
|
vdup();
|
|
|
|
vtop[-1].r = r; /* save register value */
|
2009-05-06 02:18:10 +08:00
|
|
|
/* increment pointer to get second word */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = VT_PTRDIFF_T;
|
|
|
|
gaddrof();
|
|
|
|
vpushs(PTR_SIZE);
|
|
|
|
gen_op('+');
|
|
|
|
vtop->r |= VT_LVAL;
|
|
|
|
vtop->type.t = load_type;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
/* move registers */
|
2019-12-17 01:44:35 +08:00
|
|
|
if (!r_ok)
|
2021-10-22 13:39:54 +08:00
|
|
|
load(r, vtop);
|
|
|
|
if (r2_ok && vtop->r2 < VT_CONST)
|
2019-12-17 01:44:35 +08:00
|
|
|
goto done;
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
|
|
|
vtop[-1].r = r; /* save register value */
|
|
|
|
vtop->r = vtop[-1].r2;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2012-07-12 05:39:05 +08:00
|
|
|
/* Allocate second register. Here we rely on the fact that
|
|
|
|
get_reg() tries first to free r2 of an SValue. */
|
2021-10-22 13:39:54 +08:00
|
|
|
r2 = get_reg(rc2);
|
|
|
|
load(r2, vtop);
|
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* write second register */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r2 = r2;
|
2019-12-17 01:44:35 +08:00
|
|
|
done:
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = original_type;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r == VT_CMP)
|
|
|
|
vset_VT_JMP();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* one register type load */
|
2021-10-22 13:39:54 +08:00
|
|
|
load(r, vtop);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r = r;
|
2009-05-06 02:18:10 +08:00
|
|
|
#ifdef TCC_TARGET_C67
|
|
|
|
/* uses register pairs for doubles */
|
2019-12-17 01:44:35 +08:00
|
|
|
if (bt == VT_DOUBLE)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r2 = r+1;
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* generate vtop[-1] and vtop[0] in resp. classes rc1 and rc2 */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void gv2(int rc1, int rc2)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
/* generate more generic register first. But VT_JMP or VT_CMP
|
|
|
|
values must be generated first in all cases to avoid possible
|
|
|
|
reload errors */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r != VT_CMP && rc1 <= rc2) {
|
|
|
|
vswap();
|
|
|
|
gv(rc1);
|
|
|
|
vswap();
|
|
|
|
gv(rc2);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* test if reload is needed for first register */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop[-1].r & VT_VALMASK) >= VT_CONST) {
|
|
|
|
vswap();
|
|
|
|
gv(rc1);
|
|
|
|
vswap();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(rc2);
|
|
|
|
vswap();
|
|
|
|
gv(rc1);
|
|
|
|
vswap();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* test if reload is needed for first register */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop[0].r & VT_VALMASK) >= VT_CONST) {
|
|
|
|
gv(rc2);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-13 14:59:06 +08:00
|
|
|
#if PTR_SIZE == 4
|
2016-10-17 06:57:16 +08:00
|
|
|
/* expand 64bit on stack in two ints */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void lexpand(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2016-10-17 01:04:40 +08:00
|
|
|
int u, v;
|
2021-10-22 13:39:54 +08:00
|
|
|
u = vtop->type.t & (VT_DEFSIGN | VT_UNSIGNED);
|
|
|
|
v = vtop->r & (VT_VALMASK | VT_LVAL);
|
2016-10-17 01:04:40 +08:00
|
|
|
if (v == VT_CONST) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
|
|
|
vtop[0].c.i >>= 32;
|
2016-10-17 01:04:40 +08:00
|
|
|
} else if (v == (VT_LVAL|VT_CONST) || v == (VT_LVAL|VT_LOCAL)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
|
|
|
vtop[0].c.i += 4;
|
2016-10-17 01:04:40 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(RC_INT);
|
|
|
|
vdup();
|
|
|
|
vtop[0].r = vtop[-1].r2;
|
|
|
|
vtop[0].r2 = vtop[-1].r2 = VT_CONST;
|
2016-10-17 01:04:40 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop[0].type.t = vtop[-1].type.t = VT_INT | u;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2016-12-06 07:29:25 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2017-05-13 14:59:06 +08:00
|
|
|
#if PTR_SIZE == 4
|
2009-05-06 02:18:10 +08:00
|
|
|
/* build a long long from two ints */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void lbuild(int t)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
gv2(RC_INT, RC_INT);
|
|
|
|
vtop[-1].r2 = vtop[0].r;
|
|
|
|
vtop[-1].type.t = t;
|
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2016-10-17 06:57:16 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
/* convert stack entry to register and duplicate its value in another
|
|
|
|
register */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gv_dup(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2019-12-17 01:44:35 +08:00
|
|
|
int t, rc, r;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
t = vtop->type.t;
|
2017-05-13 14:59:06 +08:00
|
|
|
#if PTR_SIZE == 4
|
2009-05-06 02:18:10 +08:00
|
|
|
if ((t & VT_BTYPE) == VT_LLONG) {
|
2017-07-09 18:38:59 +08:00
|
|
|
if (t & VT_BITFIELD) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(RC_INT);
|
|
|
|
t = vtop->type.t;
|
|
|
|
}
|
|
|
|
lexpand();
|
|
|
|
gv_dup();
|
|
|
|
vswap();
|
|
|
|
vrotb(3);
|
|
|
|
gv_dup();
|
|
|
|
vrotb(4);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: H L L1 H1 */
|
2021-10-22 13:39:54 +08:00
|
|
|
lbuild(t);
|
|
|
|
vrotb(3);
|
|
|
|
vrotb(3);
|
|
|
|
vswap();
|
|
|
|
lbuild(t);
|
|
|
|
vswap();
|
2019-12-17 01:44:35 +08:00
|
|
|
return;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2019-12-17 01:44:35 +08:00
|
|
|
#endif
|
|
|
|
/* duplicate value */
|
|
|
|
rc = RC_TYPE(t);
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(rc);
|
|
|
|
r = get_reg(rc);
|
|
|
|
vdup();
|
|
|
|
load(r, vtop);
|
|
|
|
vtop->r = r;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2017-05-13 14:59:06 +08:00
|
|
|
#if PTR_SIZE == 4
|
2009-05-06 02:18:10 +08:00
|
|
|
/* generate CPU independent (unsigned) long long operations */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_opl(int op)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int t, a, b, op1, c, i;
|
|
|
|
int func;
|
|
|
|
unsigned short reg_iret = REG_IRET;
|
2019-12-17 01:44:35 +08:00
|
|
|
unsigned short reg_lret = REG_IRE2;
|
2009-05-06 02:18:10 +08:00
|
|
|
SValue tmp;
|
|
|
|
|
|
|
|
switch(op) {
|
|
|
|
case '/':
|
|
|
|
case TOK_PDIV:
|
|
|
|
func = TOK___divdi3;
|
|
|
|
goto gen_func;
|
|
|
|
case TOK_UDIV:
|
|
|
|
func = TOK___udivdi3;
|
|
|
|
goto gen_func;
|
|
|
|
case '%':
|
|
|
|
func = TOK___moddi3;
|
|
|
|
goto gen_mod_func;
|
|
|
|
case TOK_UMOD:
|
|
|
|
func = TOK___umoddi3;
|
|
|
|
gen_mod_func:
|
|
|
|
#ifdef TCC_ARM_EABI
|
|
|
|
reg_iret = TREG_R2;
|
|
|
|
reg_lret = TREG_R3;
|
|
|
|
#endif
|
|
|
|
gen_func:
|
|
|
|
/* call generic long long function */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(func);
|
|
|
|
vrott(3);
|
|
|
|
gfunc_call(2);
|
|
|
|
vpushi(0);
|
|
|
|
vtop->r = reg_iret;
|
|
|
|
vtop->r2 = reg_lret;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case '^':
|
|
|
|
case '&':
|
|
|
|
case '|':
|
|
|
|
case '*':
|
|
|
|
case '+':
|
|
|
|
case '-':
|
2016-10-14 01:21:43 +08:00
|
|
|
//pv("gen_opl A",0,2);
|
2021-10-22 13:39:54 +08:00
|
|
|
t = vtop->type.t;
|
|
|
|
vswap();
|
|
|
|
lexpand();
|
|
|
|
vrotb(3);
|
|
|
|
lexpand();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: L1 H1 L2 H2 */
|
2021-10-22 13:39:54 +08:00
|
|
|
tmp = vtop[0];
|
|
|
|
vtop[0] = vtop[-3];
|
|
|
|
vtop[-3] = tmp;
|
|
|
|
tmp = vtop[-2];
|
|
|
|
vtop[-2] = vtop[-3];
|
|
|
|
vtop[-3] = tmp;
|
|
|
|
vswap();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: H1 H2 L1 L2 */
|
2016-10-14 01:21:43 +08:00
|
|
|
//pv("gen_opl B",0,4);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (op == '*') {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushv(vtop - 1);
|
|
|
|
vpushv(vtop - 1);
|
|
|
|
gen_op(TOK_UMULL);
|
|
|
|
lexpand();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: H1 H2 L1 L2 ML MH */
|
|
|
|
for(i=0;i<4;i++)
|
2021-10-22 13:39:54 +08:00
|
|
|
vrotb(6);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: ML MH H1 H2 L1 L2 */
|
2021-10-22 13:39:54 +08:00
|
|
|
tmp = vtop[0];
|
|
|
|
vtop[0] = vtop[-2];
|
|
|
|
vtop[-2] = tmp;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: ML MH H1 L2 H2 L1 */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op('*');
|
|
|
|
vrotb(3);
|
|
|
|
vrotb(3);
|
|
|
|
gen_op('*');
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: ML MH M1 M2 */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op('+');
|
|
|
|
gen_op('+');
|
2009-05-06 02:18:10 +08:00
|
|
|
} else if (op == '+' || op == '-') {
|
|
|
|
/* XXX: add non carry method too (for MIPS or alpha) */
|
|
|
|
if (op == '+')
|
|
|
|
op1 = TOK_ADDC1;
|
|
|
|
else
|
|
|
|
op1 = TOK_SUBC1;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(op1);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: H1 H2 (L1 op L2) */
|
2021-10-22 13:39:54 +08:00
|
|
|
vrotb(3);
|
|
|
|
vrotb(3);
|
|
|
|
gen_op(op1 + 1); /* TOK_xxxC2 */
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(op);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: H1 H2 (L1 op L2) */
|
2021-10-22 13:39:54 +08:00
|
|
|
vrotb(3);
|
|
|
|
vrotb(3);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: (L1 op L2) H1 H2 */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(op);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: (L1 op L2) (H1 op H2) */
|
|
|
|
}
|
|
|
|
/* stack: L H */
|
2021-10-22 13:39:54 +08:00
|
|
|
lbuild(t);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_SAR:
|
|
|
|
case TOK_SHR:
|
|
|
|
case TOK_SHL:
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
|
|
|
|
t = vtop[-1].type.t;
|
|
|
|
vswap();
|
|
|
|
lexpand();
|
|
|
|
vrotb(3);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: L H shift */
|
2021-10-22 13:39:54 +08:00
|
|
|
c = (int)vtop->c.i;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* constant: simpler */
|
|
|
|
/* NOTE: all comments are for SHL. the other cases are
|
2017-05-08 12:38:09 +08:00
|
|
|
done by swapping words */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
if (op != TOK_SHL)
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
2009-05-06 02:18:10 +08:00
|
|
|
if (c >= 32) {
|
|
|
|
/* stack: L H */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
if (c > 32) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(c - 32);
|
|
|
|
gen_op(op);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
if (op != TOK_SAR) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gv_dup();
|
|
|
|
vpushi(31);
|
|
|
|
gen_op(TOK_SAR);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
|
|
|
gv_dup();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: H L L */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(c);
|
|
|
|
gen_op(op);
|
|
|
|
vswap();
|
|
|
|
vpushi(32 - c);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (op == TOK_SHL)
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(TOK_SHR);
|
2009-05-06 02:18:10 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(TOK_SHL);
|
|
|
|
vrotb(3);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: L L H */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(c);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (op == TOK_SHL)
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(TOK_SHL);
|
2009-05-06 02:18:10 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(TOK_SHR);
|
|
|
|
gen_op('|');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
if (op != TOK_SHL)
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
|
|
|
lbuild(t);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
/* XXX: should provide a faster fallback on x86 ? */
|
|
|
|
switch(op) {
|
|
|
|
case TOK_SAR:
|
|
|
|
func = TOK___ashrdi3;
|
|
|
|
goto gen_func;
|
|
|
|
case TOK_SHR:
|
|
|
|
func = TOK___lshrdi3;
|
|
|
|
goto gen_func;
|
|
|
|
case TOK_SHL:
|
|
|
|
func = TOK___ashldi3;
|
|
|
|
goto gen_func;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* compare operations */
|
2021-10-22 13:39:54 +08:00
|
|
|
t = vtop->type.t;
|
|
|
|
vswap();
|
|
|
|
lexpand();
|
|
|
|
vrotb(3);
|
|
|
|
lexpand();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: L1 H1 L2 H2 */
|
2021-10-22 13:39:54 +08:00
|
|
|
tmp = vtop[-1];
|
|
|
|
vtop[-1] = vtop[-2];
|
|
|
|
vtop[-2] = tmp;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* stack: L1 L2 H1 H2 */
|
2021-10-22 13:39:54 +08:00
|
|
|
save_regs(4);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* compare high */
|
|
|
|
op1 = op;
|
|
|
|
/* when values are equal, we need to compare low words. since
|
|
|
|
the jump is inverted, we invert the test too. */
|
|
|
|
if (op1 == TOK_LT)
|
|
|
|
op1 = TOK_LE;
|
|
|
|
else if (op1 == TOK_GT)
|
|
|
|
op1 = TOK_GE;
|
|
|
|
else if (op1 == TOK_ULT)
|
|
|
|
op1 = TOK_ULE;
|
|
|
|
else if (op1 == TOK_UGT)
|
|
|
|
op1 = TOK_UGE;
|
|
|
|
a = 0;
|
|
|
|
b = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(op1);
|
2017-02-14 01:23:43 +08:00
|
|
|
if (op == TOK_NE) {
|
2021-10-22 13:39:54 +08:00
|
|
|
b = gvtst(0, 0);
|
2017-02-14 01:23:43 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
a = gvtst(1, 0);
|
2017-02-14 01:23:43 +08:00
|
|
|
if (op != TOK_EQ) {
|
|
|
|
/* generate non equal test */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
|
|
|
vset_VT_CMP(TOK_NE);
|
|
|
|
b = gvtst(0, 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* compare low. Always unsigned */
|
|
|
|
op1 = op;
|
|
|
|
if (op1 == TOK_LT)
|
|
|
|
op1 = TOK_ULT;
|
|
|
|
else if (op1 == TOK_LE)
|
|
|
|
op1 = TOK_ULE;
|
|
|
|
else if (op1 == TOK_GT)
|
|
|
|
op1 = TOK_UGT;
|
|
|
|
else if (op1 == TOK_GE)
|
|
|
|
op1 = TOK_UGE;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(op1);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
#if 0//def TCC_TARGET_I386
|
2021-10-22 13:39:54 +08:00
|
|
|
if (op == TOK_NE) { gsym(b); break; }
|
|
|
|
if (op == TOK_EQ) { gsym(a); break; }
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
gvtst_set(1, a);
|
|
|
|
gvtst_set(0, b);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-11-18 03:34:31 +08:00
|
|
|
static uint64_t gen_opic_sdiv(uint64_t a, uint64_t b)
|
|
|
|
{
|
|
|
|
uint64_t x = (a >> 63 ? -a : a) / (b >> 63 ? -b : b);
|
|
|
|
return (a ^ b) >> 63 ? -x : x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gen_opic_lt(uint64_t a, uint64_t b)
|
|
|
|
{
|
|
|
|
return (a ^ (uint64_t)1 << 63) < (b ^ (uint64_t)1 << 63);
|
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* handle integer constant optimizations and various machine
|
|
|
|
independent opt */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_opic(int op)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
SValue *v1 = vtop - 1;
|
|
|
|
SValue *v2 = vtop;
|
2015-11-18 03:34:31 +08:00
|
|
|
int t1 = v1->type.t & VT_BTYPE;
|
|
|
|
int t2 = v2->type.t & VT_BTYPE;
|
|
|
|
int c1 = (v1->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
|
|
|
|
int c2 = (v2->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
|
2022-08-16 22:58:00 +08:00
|
|
|
int nonconst = (v1->r | v2->r) & VT_NONCONST;
|
2015-11-18 03:34:31 +08:00
|
|
|
uint64_t l1 = c1 ? v1->c.i : 0;
|
|
|
|
uint64_t l2 = c2 ? v2->c.i : 0;
|
|
|
|
int shm = (t1 == VT_LLONG) ? 63 : 31;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2016-10-04 07:20:33 +08:00
|
|
|
if (t1 != VT_LLONG && (PTR_SIZE != 8 || t1 != VT_PTR))
|
2015-11-18 03:09:35 +08:00
|
|
|
l1 = ((uint32_t)l1 |
|
|
|
|
(v1->type.t & VT_UNSIGNED ? 0 : -(l1 & 0x80000000)));
|
2016-10-04 07:20:33 +08:00
|
|
|
if (t2 != VT_LLONG && (PTR_SIZE != 8 || t2 != VT_PTR))
|
2015-11-18 03:09:35 +08:00
|
|
|
l2 = ((uint32_t)l2 |
|
|
|
|
(v2->type.t & VT_UNSIGNED ? 0 : -(l2 & 0x80000000)));
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
if (c1 && c2) {
|
|
|
|
switch(op) {
|
|
|
|
case '+': l1 += l2; break;
|
|
|
|
case '-': l1 -= l2; break;
|
|
|
|
case '&': l1 &= l2; break;
|
|
|
|
case '^': l1 ^= l2; break;
|
|
|
|
case '|': l1 |= l2; break;
|
|
|
|
case '*': l1 *= l2; break;
|
|
|
|
|
|
|
|
case TOK_PDIV:
|
|
|
|
case '/':
|
|
|
|
case '%':
|
|
|
|
case TOK_UDIV:
|
|
|
|
case TOK_UMOD:
|
|
|
|
/* if division by zero, generate explicit division */
|
|
|
|
if (l2 == 0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (const_wanted && !(nocode_wanted & unevalmask))
|
|
|
|
tcc_error("division by zero in constant");
|
2009-05-06 02:18:10 +08:00
|
|
|
goto general_case;
|
|
|
|
}
|
|
|
|
switch(op) {
|
2015-11-18 03:34:31 +08:00
|
|
|
default: l1 = gen_opic_sdiv(l1, l2); break;
|
|
|
|
case '%': l1 = l1 - l2 * gen_opic_sdiv(l1, l2); break;
|
|
|
|
case TOK_UDIV: l1 = l1 / l2; break;
|
|
|
|
case TOK_UMOD: l1 = l1 % l2; break;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
2015-11-18 03:34:31 +08:00
|
|
|
case TOK_SHL: l1 <<= (l2 & shm); break;
|
|
|
|
case TOK_SHR: l1 >>= (l2 & shm); break;
|
|
|
|
case TOK_SAR:
|
|
|
|
l1 = (l1 >> 63) ? ~(~l1 >> (l2 & shm)) : l1 >> (l2 & shm);
|
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* tests */
|
2015-11-18 03:34:31 +08:00
|
|
|
case TOK_ULT: l1 = l1 < l2; break;
|
|
|
|
case TOK_UGE: l1 = l1 >= l2; break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_EQ: l1 = l1 == l2; break;
|
|
|
|
case TOK_NE: l1 = l1 != l2; break;
|
2015-11-18 03:34:31 +08:00
|
|
|
case TOK_ULE: l1 = l1 <= l2; break;
|
|
|
|
case TOK_UGT: l1 = l1 > l2; break;
|
|
|
|
case TOK_LT: l1 = gen_opic_lt(l1, l2); break;
|
|
|
|
case TOK_GE: l1 = !gen_opic_lt(l1, l2); break;
|
|
|
|
case TOK_LE: l1 = !gen_opic_lt(l2, l1); break;
|
|
|
|
case TOK_GT: l1 = gen_opic_lt(l2, l1); break;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* logical */
|
|
|
|
case TOK_LAND: l1 = l1 && l2; break;
|
|
|
|
case TOK_LOR: l1 = l1 || l2; break;
|
|
|
|
default:
|
|
|
|
goto general_case;
|
|
|
|
}
|
2016-11-06 12:02:11 +08:00
|
|
|
if (t1 != VT_LLONG && (PTR_SIZE != 8 || t1 != VT_PTR))
|
|
|
|
l1 = ((uint32_t)l1 |
|
|
|
|
(v1->type.t & VT_UNSIGNED ? 0 : -(l1 & 0x80000000)));
|
2015-11-18 03:09:35 +08:00
|
|
|
v1->c.i = l1;
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2022-08-16 22:58:00 +08:00
|
|
|
nonconst = VT_NONCONST;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* if commutative ops, put c2 as constant */
|
2015-07-30 04:53:57 +08:00
|
|
|
if (c1 && (op == '+' || op == '&' || op == '^' ||
|
2019-08-13 09:21:58 +08:00
|
|
|
op == '|' || op == '*' || op == TOK_EQ || op == TOK_NE)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
2009-05-06 02:18:10 +08:00
|
|
|
c2 = c1; //c = c1, c1 = c2, c2 = c;
|
|
|
|
l2 = l1; //l = l1, l1 = l2, l2 = l;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!const_wanted &&
|
tccgen.c: Optimise 0<<x, 0>>x, -1>>x, x&0, x*0, x|-1, x%1.
More precisely, treat (0 << x) and so on as constant expressions, but
not if const_wanted as we do not want to allow "case (x*0):", ...
Do not optimise (0 / x) and (0 % x) here as x might be zero, though
for an architecture that does not generate an exception for division
by zero the back end might choose to optimise those.
2015-03-07 19:25:27 +08:00
|
|
|
c1 && ((l1 == 0 &&
|
|
|
|
(op == TOK_SHL || op == TOK_SHR || op == TOK_SAR)) ||
|
|
|
|
(l1 == -1 && op == TOK_SAR))) {
|
|
|
|
/* treat (0 << x), (0 >> x) and (-1 >> x) as constant */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
|
|
|
} else if (!const_wanted &&
|
tccgen.c: Optimise 0<<x, 0>>x, -1>>x, x&0, x*0, x|-1, x%1.
More precisely, treat (0 << x) and so on as constant expressions, but
not if const_wanted as we do not want to allow "case (x*0):", ...
Do not optimise (0 / x) and (0 % x) here as x might be zero, though
for an architecture that does not generate an exception for division
by zero the back end might choose to optimise those.
2015-03-07 19:25:27 +08:00
|
|
|
c2 && ((l2 == 0 && (op == '&' || op == '*')) ||
|
2017-07-09 18:07:40 +08:00
|
|
|
(op == '|' &&
|
|
|
|
(l2 == -1 || (l2 == 0xFFFFFFFF && t2 != VT_LLONG))) ||
|
tccgen.c: Optimise 0<<x, 0>>x, -1>>x, x&0, x*0, x|-1, x%1.
More precisely, treat (0 << x) and so on as constant expressions, but
not if const_wanted as we do not want to allow "case (x*0):", ...
Do not optimise (0 / x) and (0 % x) here as x might be zero, though
for an architecture that does not generate an exception for division
by zero the back end might choose to optimise those.
2015-03-07 19:25:27 +08:00
|
|
|
(l2 == 1 && (op == '%' || op == TOK_UMOD)))) {
|
|
|
|
/* treat (x & 0), (x * 0), (x | -1) and (x % 1) as constant */
|
|
|
|
if (l2 == 1)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = 0;
|
|
|
|
vswap();
|
|
|
|
vtop--;
|
tccgen.c: Optimise 0<<x, 0>>x, -1>>x, x&0, x*0, x|-1, x%1.
More precisely, treat (0 << x) and so on as constant expressions, but
not if const_wanted as we do not want to allow "case (x*0):", ...
Do not optimise (0 / x) and (0 % x) here as x might be zero, though
for an architecture that does not generate an exception for division
by zero the back end might choose to optimise those.
2015-03-07 19:25:27 +08:00
|
|
|
} else if (c2 && (((op == '*' || op == '/' || op == TOK_UDIV ||
|
|
|
|
op == TOK_PDIV) &&
|
|
|
|
l2 == 1) ||
|
|
|
|
((op == '+' || op == '-' || op == '|' || op == '^' ||
|
|
|
|
op == TOK_SHL || op == TOK_SHR || op == TOK_SAR) &&
|
|
|
|
l2 == 0) ||
|
|
|
|
(op == '&' &&
|
2017-07-09 18:07:40 +08:00
|
|
|
(l2 == -1 || (l2 == 0xFFFFFFFF && t2 != VT_LLONG))))) {
|
tccgen.c: Optimise 0<<x, 0>>x, -1>>x, x&0, x*0, x|-1, x%1.
More precisely, treat (0 << x) and so on as constant expressions, but
not if const_wanted as we do not want to allow "case (x*0):", ...
Do not optimise (0 / x) and (0 % x) here as x might be zero, though
for an architecture that does not generate an exception for division
by zero the back end might choose to optimise those.
2015-03-07 19:25:27 +08:00
|
|
|
/* filter out NOP operations like x*1, x-0, x&-1... */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else if (c2 && (op == '*' || op == TOK_PDIV || op == TOK_UDIV)) {
|
|
|
|
/* try to use shifts instead of muls or divs */
|
|
|
|
if (l2 > 0 && (l2 & (l2 - 1)) == 0) {
|
2015-11-18 03:34:31 +08:00
|
|
|
int n = -1;
|
2009-05-06 02:18:10 +08:00
|
|
|
while (l2) {
|
|
|
|
l2 >>= 1;
|
|
|
|
n++;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = n;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (op == '*')
|
|
|
|
op = TOK_SHL;
|
|
|
|
else if (op == TOK_PDIV)
|
|
|
|
op = TOK_SAR;
|
|
|
|
else
|
|
|
|
op = TOK_SHR;
|
|
|
|
}
|
|
|
|
goto general_case;
|
|
|
|
} else if (c2 && (op == '+' || op == '-') &&
|
2021-10-22 13:39:54 +08:00
|
|
|
(((vtop[-1].r & (VT_VALMASK | VT_LVAL | VT_SYM)) == (VT_CONST | VT_SYM))
|
|
|
|
|| (vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_LOCAL)) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* symbol + constant case */
|
|
|
|
if (op == '-')
|
|
|
|
l2 = -l2;
|
2021-10-22 13:39:54 +08:00
|
|
|
l2 += vtop[-1].c.i;
|
2016-10-09 06:44:22 +08:00
|
|
|
/* The backends can't always deal with addends to symbols
|
|
|
|
larger than +-1<<31. Don't construct such. */
|
|
|
|
if ((int)l2 != l2)
|
|
|
|
goto general_case;
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
|
|
|
vtop->c.i = l2;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
general_case:
|
|
|
|
/* call low level op generator */
|
2015-02-14 02:58:31 +08:00
|
|
|
if (t1 == VT_LLONG || t2 == VT_LLONG ||
|
|
|
|
(PTR_SIZE == 8 && (t1 == VT_PTR || t2 == VT_PTR)))
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_opl(op);
|
2009-05-06 02:18:10 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_opi(op);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
2022-08-16 22:58:00 +08:00
|
|
|
if (vtop->r == VT_CONST)
|
|
|
|
vtop->r |= nonconst;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2021-01-04 20:16:05 +08:00
|
|
|
#if defined TCC_TARGET_X86_64 || defined TCC_TARGET_I386
|
|
|
|
# define gen_negf gen_opf
|
2021-01-26 23:51:20 +08:00
|
|
|
#elif defined TCC_TARGET_ARM
|
2021-10-22 13:39:54 +08:00
|
|
|
void gen_negf(int op)
|
2021-01-26 23:51:20 +08:00
|
|
|
{
|
|
|
|
/* arm will detect 0-x and replace by vneg */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0), vswap(), gen_op('-');
|
2021-01-26 23:51:20 +08:00
|
|
|
}
|
2021-01-04 20:16:05 +08:00
|
|
|
#else
|
|
|
|
/* XXX: implement in gen_opf() for other backends too */
|
2021-10-22 13:39:54 +08:00
|
|
|
void gen_negf(int op)
|
2021-01-04 20:16:05 +08:00
|
|
|
{
|
|
|
|
/* In IEEE negate(x) isn't subtract(0,x). Without NaNs it's
|
|
|
|
subtract(-0, x), but with them it's really a sign flip
|
|
|
|
operation. We implement this with bit manipulation and have
|
|
|
|
to do some type reinterpretation for this, which TCC can do
|
|
|
|
only via memory. */
|
|
|
|
|
|
|
|
int align, size, bt;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
size = type_size(&vtop->type, &align);
|
|
|
|
bt = vtop->type.t & VT_BTYPE;
|
|
|
|
save_reg(gv(RC_TYPE(bt)));
|
|
|
|
vdup();
|
|
|
|
incr_bf_adr(size - 1);
|
|
|
|
vdup();
|
|
|
|
vpushi(0x80); /* flip sign */
|
|
|
|
gen_op('^');
|
|
|
|
vstore();
|
|
|
|
vpop();
|
2021-01-04 20:16:05 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* generate a floating point operation with constant propagation */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_opif(int op)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int c1, c2;
|
|
|
|
SValue *v1, *v2;
|
2018-06-08 21:31:40 +08:00
|
|
|
#if defined _MSC_VER && defined __x86_64__
|
2017-07-09 18:07:40 +08:00
|
|
|
/* avoid bad optimization with f1 -= f2 for f1:-0.0, f2:0.0 */
|
|
|
|
volatile
|
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
long double f1, f2;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
v1 = vtop - 1;
|
|
|
|
v2 = vtop;
|
2021-01-04 20:16:05 +08:00
|
|
|
if (op == TOK_NEG)
|
|
|
|
v1 = v2;
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* currently, we cannot do computations with forward symbols */
|
|
|
|
c1 = (v1->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
|
|
|
|
c2 = (v2->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
|
|
|
|
if (c1 && c2) {
|
|
|
|
if (v1->type.t == VT_FLOAT) {
|
|
|
|
f1 = v1->c.f;
|
|
|
|
f2 = v2->c.f;
|
|
|
|
} else if (v1->type.t == VT_DOUBLE) {
|
|
|
|
f1 = v1->c.d;
|
|
|
|
f2 = v2->c.d;
|
|
|
|
} else {
|
|
|
|
f1 = v1->c.ld;
|
|
|
|
f2 = v2->c.ld;
|
|
|
|
}
|
|
|
|
/* NOTE: we only do constant propagation if finite number (not
|
|
|
|
NaN or infinity) (ANSI spec) */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!(ieee_finite(f1) || !ieee_finite(f2)) && !const_wanted)
|
2009-05-06 02:18:10 +08:00
|
|
|
goto general_case;
|
|
|
|
switch(op) {
|
|
|
|
case '+': f1 += f2; break;
|
|
|
|
case '-': f1 -= f2; break;
|
|
|
|
case '*': f1 *= f2; break;
|
2015-07-30 04:53:57 +08:00
|
|
|
case '/':
|
2021-02-13 05:52:05 +08:00
|
|
|
if (f2 == 0.0) {
|
2021-01-04 20:16:05 +08:00
|
|
|
union { float f; unsigned u; } x1, x2, y;
|
2017-12-25 19:44:29 +08:00
|
|
|
/* If not in initializer we need to potentially generate
|
|
|
|
FP exceptions at runtime, otherwise we want to fold. */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!const_wanted)
|
2017-12-25 19:44:29 +08:00
|
|
|
goto general_case;
|
2021-01-04 20:16:05 +08:00
|
|
|
/* the run-time result of 0.0/0.0 on x87, also of other compilers
|
|
|
|
when used to compile the f1 /= f2 below, would be -nan */
|
|
|
|
x1.f = f1, x2.f = f2;
|
2021-02-13 05:52:05 +08:00
|
|
|
if (f1 == 0.0)
|
2021-01-04 20:16:05 +08:00
|
|
|
y.u = 0x7fc00000; /* nan */
|
|
|
|
else
|
|
|
|
y.u = 0x7f800000; /* infinity */
|
|
|
|
y.u |= (x1.u ^ x2.u) & 0x80000000; /* set sign */
|
|
|
|
f1 = y.f;
|
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-01-04 20:16:05 +08:00
|
|
|
f1 /= f2;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2021-01-04 20:16:05 +08:00
|
|
|
case TOK_NEG:
|
|
|
|
f1 = -f1;
|
|
|
|
goto unary_result;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* XXX: also handles tests ? */
|
|
|
|
default:
|
|
|
|
goto general_case;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
2021-01-04 20:16:05 +08:00
|
|
|
unary_result:
|
2009-05-06 02:18:10 +08:00
|
|
|
/* XXX: overflow test ? */
|
|
|
|
if (v1->type.t == VT_FLOAT) {
|
|
|
|
v1->c.f = f1;
|
|
|
|
} else if (v1->type.t == VT_DOUBLE) {
|
|
|
|
v1->c.d = f1;
|
|
|
|
} else {
|
|
|
|
v1->c.ld = f1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
general_case:
|
2021-01-04 20:16:05 +08:00
|
|
|
if (op == TOK_NEG) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_negf(op);
|
2021-01-04 20:16:05 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_opf(op);
|
2021-01-04 20:16:05 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-15 08:08:09 +08:00
|
|
|
/* print a type. If 'varstr' is not NULL, then the variable is also
|
|
|
|
printed in the type */
|
|
|
|
/* XXX: union */
|
|
|
|
/* XXX: add array and function pointers */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void type_to_str(char *buf, int buf_size,
|
2020-04-15 08:08:09 +08:00
|
|
|
CType *type, const char *varstr)
|
|
|
|
{
|
|
|
|
int bt, v, t;
|
|
|
|
Sym *s, *sa;
|
|
|
|
char buf1[256];
|
|
|
|
const char *tstr;
|
|
|
|
|
|
|
|
t = type->t;
|
|
|
|
bt = t & VT_BTYPE;
|
|
|
|
buf[0] = '\0';
|
|
|
|
|
|
|
|
if (t & VT_EXTERN)
|
|
|
|
pstrcat(buf, buf_size, "extern ");
|
|
|
|
if (t & VT_STATIC)
|
|
|
|
pstrcat(buf, buf_size, "static ");
|
|
|
|
if (t & VT_TYPEDEF)
|
|
|
|
pstrcat(buf, buf_size, "typedef ");
|
|
|
|
if (t & VT_INLINE)
|
|
|
|
pstrcat(buf, buf_size, "inline ");
|
2021-02-01 22:10:58 +08:00
|
|
|
if (bt != VT_PTR) {
|
|
|
|
if (t & VT_VOLATILE)
|
|
|
|
pstrcat(buf, buf_size, "volatile ");
|
|
|
|
if (t & VT_CONSTANT)
|
|
|
|
pstrcat(buf, buf_size, "const ");
|
|
|
|
}
|
2020-04-15 08:08:09 +08:00
|
|
|
if (((t & VT_DEFSIGN) && bt == VT_BYTE)
|
|
|
|
|| ((t & VT_UNSIGNED)
|
|
|
|
&& (bt == VT_SHORT || bt == VT_INT || bt == VT_LLONG)
|
|
|
|
&& !IS_ENUM(t)
|
|
|
|
))
|
|
|
|
pstrcat(buf, buf_size, (t & VT_UNSIGNED) ? "unsigned " : "signed ");
|
|
|
|
|
|
|
|
buf_size -= strlen(buf);
|
|
|
|
buf += strlen(buf);
|
|
|
|
|
|
|
|
switch(bt) {
|
|
|
|
case VT_VOID:
|
|
|
|
tstr = "void";
|
|
|
|
goto add_tstr;
|
|
|
|
case VT_BOOL:
|
|
|
|
tstr = "_Bool";
|
|
|
|
goto add_tstr;
|
|
|
|
case VT_BYTE:
|
|
|
|
tstr = "char";
|
|
|
|
goto add_tstr;
|
|
|
|
case VT_SHORT:
|
|
|
|
tstr = "short";
|
|
|
|
goto add_tstr;
|
|
|
|
case VT_INT:
|
|
|
|
tstr = "int";
|
|
|
|
goto maybe_long;
|
|
|
|
case VT_LLONG:
|
|
|
|
tstr = "long long";
|
|
|
|
maybe_long:
|
|
|
|
if (t & VT_LONG)
|
|
|
|
tstr = "long";
|
|
|
|
if (!IS_ENUM(t))
|
|
|
|
goto add_tstr;
|
|
|
|
tstr = "enum ";
|
|
|
|
goto tstruct;
|
|
|
|
case VT_FLOAT:
|
|
|
|
tstr = "float";
|
|
|
|
goto add_tstr;
|
|
|
|
case VT_DOUBLE:
|
|
|
|
tstr = "double";
|
|
|
|
if (!(t & VT_LONG))
|
|
|
|
goto add_tstr;
|
|
|
|
case VT_LDOUBLE:
|
|
|
|
tstr = "long double";
|
|
|
|
add_tstr:
|
|
|
|
pstrcat(buf, buf_size, tstr);
|
|
|
|
break;
|
|
|
|
case VT_STRUCT:
|
|
|
|
tstr = "struct ";
|
|
|
|
if (IS_UNION(t))
|
|
|
|
tstr = "union ";
|
|
|
|
tstruct:
|
|
|
|
pstrcat(buf, buf_size, tstr);
|
|
|
|
v = type->ref->v & ~SYM_STRUCT;
|
|
|
|
if (v >= SYM_FIRST_ANOM)
|
|
|
|
pstrcat(buf, buf_size, "<anonymous>");
|
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
pstrcat(buf, buf_size, get_tok_str(v, NULL));
|
2020-04-15 08:08:09 +08:00
|
|
|
break;
|
|
|
|
case VT_FUNC:
|
|
|
|
s = type->ref;
|
|
|
|
buf1[0]=0;
|
|
|
|
if (varstr && '*' == *varstr) {
|
|
|
|
pstrcat(buf1, sizeof(buf1), "(");
|
|
|
|
pstrcat(buf1, sizeof(buf1), varstr);
|
|
|
|
pstrcat(buf1, sizeof(buf1), ")");
|
|
|
|
}
|
|
|
|
pstrcat(buf1, buf_size, "(");
|
|
|
|
sa = s->next;
|
|
|
|
while (sa != NULL) {
|
|
|
|
char buf2[256];
|
2021-10-22 13:39:54 +08:00
|
|
|
type_to_str(buf2, sizeof(buf2), &sa->type, NULL);
|
2020-04-15 08:08:09 +08:00
|
|
|
pstrcat(buf1, sizeof(buf1), buf2);
|
|
|
|
sa = sa->next;
|
|
|
|
if (sa)
|
|
|
|
pstrcat(buf1, sizeof(buf1), ", ");
|
|
|
|
}
|
|
|
|
if (s->f.func_type == FUNC_ELLIPSIS)
|
|
|
|
pstrcat(buf1, sizeof(buf1), ", ...");
|
|
|
|
pstrcat(buf1, sizeof(buf1), ")");
|
2021-10-22 13:39:54 +08:00
|
|
|
type_to_str(buf, buf_size, &s->type, buf1);
|
2020-04-15 08:08:09 +08:00
|
|
|
goto no_var;
|
|
|
|
case VT_PTR:
|
|
|
|
s = type->ref;
|
2021-12-08 17:49:28 +08:00
|
|
|
if (t & (VT_ARRAY|VT_VLA)) {
|
2020-04-15 08:08:09 +08:00
|
|
|
if (varstr && '*' == *varstr)
|
|
|
|
snprintf(buf1, sizeof(buf1), "(%s)[%d]", varstr, s->c);
|
|
|
|
else
|
|
|
|
snprintf(buf1, sizeof(buf1), "%s[%d]", varstr ? varstr : "", s->c);
|
2021-10-22 13:39:54 +08:00
|
|
|
type_to_str(buf, buf_size, &s->type, buf1);
|
2020-04-15 08:08:09 +08:00
|
|
|
goto no_var;
|
|
|
|
}
|
|
|
|
pstrcpy(buf1, sizeof(buf1), "*");
|
|
|
|
if (t & VT_CONSTANT)
|
|
|
|
pstrcat(buf1, buf_size, "const ");
|
|
|
|
if (t & VT_VOLATILE)
|
|
|
|
pstrcat(buf1, buf_size, "volatile ");
|
|
|
|
if (varstr)
|
|
|
|
pstrcat(buf1, sizeof(buf1), varstr);
|
2021-10-22 13:39:54 +08:00
|
|
|
type_to_str(buf, buf_size, &s->type, buf1);
|
2020-04-15 08:08:09 +08:00
|
|
|
goto no_var;
|
|
|
|
}
|
|
|
|
if (varstr) {
|
|
|
|
pstrcat(buf, buf_size, " ");
|
|
|
|
pstrcat(buf, buf_size, varstr);
|
|
|
|
}
|
|
|
|
no_var: ;
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void type_incompatibility_error(CType* st, CType* dt, const char* fmt)
|
2020-04-15 08:08:09 +08:00
|
|
|
{
|
|
|
|
char buf1[256], buf2[256];
|
2021-10-22 13:39:54 +08:00
|
|
|
type_to_str(buf1, sizeof(buf1), st, NULL);
|
|
|
|
type_to_str(buf2, sizeof(buf2), dt, NULL);
|
|
|
|
tcc_error(fmt, buf1, buf2);
|
2020-04-15 08:08:09 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void type_incompatibility_warning(CType* st, CType* dt, const char* fmt)
|
2020-04-15 08:08:09 +08:00
|
|
|
{
|
|
|
|
char buf1[256], buf2[256];
|
2021-10-22 13:39:54 +08:00
|
|
|
type_to_str(buf1, sizeof(buf1), st, NULL);
|
|
|
|
type_to_str(buf2, sizeof(buf2), dt, NULL);
|
|
|
|
tcc_warning(fmt, buf1, buf2);
|
2020-04-15 08:08:09 +08:00
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
static int pointed_size(CType *type)
|
|
|
|
{
|
|
|
|
int align;
|
|
|
|
return type_size(pointed_type(type), &align);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_null_pointer(SValue *p)
|
|
|
|
{
|
2022-08-16 22:58:00 +08:00
|
|
|
if ((p->r & (VT_VALMASK | VT_LVAL | VT_SYM | VT_NONCONST)) != VT_CONST)
|
2009-05-06 02:18:10 +08:00
|
|
|
return 0;
|
2015-11-18 03:09:35 +08:00
|
|
|
return ((p->type.t & VT_BTYPE) == VT_INT && (uint32_t)p->c.i == 0) ||
|
|
|
|
((p->type.t & VT_BTYPE) == VT_LLONG && p->c.i == 0) ||
|
|
|
|
((p->type.t & VT_BTYPE) == VT_PTR &&
|
2018-11-28 17:22:37 +08:00
|
|
|
(PTR_SIZE == 4 ? (uint32_t)p->c.i == 0 : p->c.i == 0) &&
|
2018-12-01 06:43:30 +08:00
|
|
|
((pointed_type(&p->type)->t & VT_BTYPE) == VT_VOID) &&
|
|
|
|
0 == (pointed_type(&p->type)->t & (VT_CONSTANT | VT_VOLATILE))
|
2018-11-28 17:22:37 +08:00
|
|
|
);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2020-01-18 09:36:29 +08:00
|
|
|
/* compare function types. OLD functions match any new functions */
|
|
|
|
static int is_compatible_func(CType *type1, CType *type2)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2020-01-18 09:36:29 +08:00
|
|
|
Sym *s1, *s2;
|
|
|
|
|
|
|
|
s1 = type1->ref;
|
|
|
|
s2 = type2->ref;
|
|
|
|
if (s1->f.func_call != s2->f.func_call)
|
|
|
|
return 0;
|
|
|
|
if (s1->f.func_type != s2->f.func_type
|
|
|
|
&& s1->f.func_type != FUNC_OLD
|
|
|
|
&& s2->f.func_type != FUNC_OLD)
|
|
|
|
return 0;
|
|
|
|
for (;;) {
|
|
|
|
if (!is_compatible_unqualified_types(&s1->type, &s2->type))
|
|
|
|
return 0;
|
2020-11-22 14:02:09 +08:00
|
|
|
if (s1->f.func_type == FUNC_OLD || s2->f.func_type == FUNC_OLD )
|
|
|
|
return 1;
|
2020-01-18 09:36:29 +08:00
|
|
|
s1 = s1->next;
|
|
|
|
s2 = s2->next;
|
|
|
|
if (!s1)
|
|
|
|
return !s2;
|
|
|
|
if (!s2)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return true if type1 and type2 are the same. If unqualified is
|
|
|
|
true, qualifiers on the types are ignored.
|
|
|
|
*/
|
|
|
|
static int compare_types(CType *type1, CType *type2, int unqualified)
|
|
|
|
{
|
|
|
|
int bt1, t1, t2;
|
|
|
|
|
|
|
|
t1 = type1->t & VT_TYPE;
|
|
|
|
t2 = type2->t & VT_TYPE;
|
|
|
|
if (unqualified) {
|
|
|
|
/* strip qualifiers before comparing */
|
|
|
|
t1 &= ~(VT_CONSTANT | VT_VOLATILE);
|
|
|
|
t2 &= ~(VT_CONSTANT | VT_VOLATILE);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2020-01-18 09:36:29 +08:00
|
|
|
/* Default Vs explicit signedness only matters for char */
|
|
|
|
if ((t1 & VT_BTYPE) != VT_BYTE) {
|
|
|
|
t1 &= ~VT_DEFSIGN;
|
|
|
|
t2 &= ~VT_DEFSIGN;
|
|
|
|
}
|
|
|
|
/* XXX: bitfields ? */
|
|
|
|
if (t1 != t2)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((t1 & VT_ARRAY)
|
|
|
|
&& !(type1->ref->c < 0
|
|
|
|
|| type2->ref->c < 0
|
|
|
|
|| type1->ref->c == type2->ref->c))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* test more complicated cases */
|
|
|
|
bt1 = t1 & VT_BTYPE;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (bt1 == VT_PTR) {
|
|
|
|
type1 = pointed_type(type1);
|
|
|
|
type2 = pointed_type(type2);
|
2020-01-18 09:36:29 +08:00
|
|
|
return is_compatible_types(type1, type2);
|
|
|
|
} else if (bt1 == VT_STRUCT) {
|
|
|
|
return (type1->ref == type2->ref);
|
|
|
|
} else if (bt1 == VT_FUNC) {
|
|
|
|
return is_compatible_func(type1, type2);
|
2020-06-05 22:02:08 +08:00
|
|
|
} else if (IS_ENUM(type1->t) && IS_ENUM(type2->t)) {
|
|
|
|
/* If both are enums then they must be the same, if only one is then
|
|
|
|
t1 and t2 must be equal, which was checked above already. */
|
2020-01-18 09:36:29 +08:00
|
|
|
return type1->ref == type2->ref;
|
|
|
|
} else {
|
|
|
|
return 1;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2020-01-18 09:36:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if OP1 and OP2 can be "combined" with operation OP, the combined
|
|
|
|
type is stored in DEST if non-null (except for pointer plus/minus) . */
|
2021-10-22 13:39:54 +08:00
|
|
|
static int combine_types(CType *dest, SValue *op1, SValue *op2, int op)
|
2020-01-18 09:36:29 +08:00
|
|
|
{
|
|
|
|
CType *type1 = &op1->type, *type2 = &op2->type, type;
|
|
|
|
int t1 = type1->t, t2 = type2->t, bt1 = t1 & VT_BTYPE, bt2 = t2 & VT_BTYPE;
|
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
type.t = VT_VOID;
|
|
|
|
type.ref = NULL;
|
|
|
|
|
|
|
|
if (bt1 == VT_VOID || bt2 == VT_VOID) {
|
|
|
|
ret = op == '?' ? 1 : 0;
|
|
|
|
/* NOTE: as an extension, we accept void on only one side */
|
|
|
|
type.t = VT_VOID;
|
|
|
|
} else if (bt1 == VT_PTR || bt2 == VT_PTR) {
|
|
|
|
if (op == '+') ; /* Handled in caller */
|
|
|
|
/* http://port70.net/~nsz/c/c99/n1256.html#6.5.15p6 */
|
|
|
|
/* If one is a null ptr constant the result type is the other. */
|
|
|
|
else if (is_null_pointer (op2)) type = *type1;
|
|
|
|
else if (is_null_pointer (op1)) type = *type2;
|
|
|
|
else if (bt1 != bt2) {
|
|
|
|
/* accept comparison or cond-expr between pointer and integer
|
|
|
|
with a warning */
|
2020-06-18 00:08:09 +08:00
|
|
|
if ((op == '?' || TOK_ISCOND(op))
|
2020-01-18 09:36:29 +08:00
|
|
|
&& (is_integer_btype(bt1) || is_integer_btype(bt2)))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("pointer/integer mismatch in %s",
|
2020-01-18 09:36:29 +08:00
|
|
|
op == '?' ? "conditional expression" : "comparison");
|
|
|
|
else if (op != '-' || !is_integer_btype(bt2))
|
|
|
|
ret = 0;
|
|
|
|
type = *(bt1 == VT_PTR ? type1 : type2);
|
|
|
|
} else {
|
|
|
|
CType *pt1 = pointed_type(type1);
|
|
|
|
CType *pt2 = pointed_type(type2);
|
|
|
|
int pbt1 = pt1->t & VT_BTYPE;
|
|
|
|
int pbt2 = pt2->t & VT_BTYPE;
|
|
|
|
int newquals, copied = 0;
|
|
|
|
if (pbt1 != VT_VOID && pbt2 != VT_VOID
|
|
|
|
&& !compare_types(pt1, pt2, 1/*unqualif*/)) {
|
2020-06-18 00:08:09 +08:00
|
|
|
if (op != '?' && !TOK_ISCOND(op))
|
2020-01-18 09:36:29 +08:00
|
|
|
ret = 0;
|
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
type_incompatibility_warning(type1, type2,
|
2020-01-18 09:36:29 +08:00
|
|
|
op == '?'
|
|
|
|
? "pointer type mismatch in conditional expression ('%s' and '%s')"
|
|
|
|
: "pointer type mismatch in comparison('%s' and '%s')");
|
|
|
|
}
|
|
|
|
if (op == '?') {
|
|
|
|
/* pointers to void get preferred, otherwise the
|
|
|
|
pointed to types minus qualifs should be compatible */
|
|
|
|
type = *((pbt1 == VT_VOID) ? type1 : type2);
|
|
|
|
/* combine qualifs */
|
|
|
|
newquals = ((pt1->t | pt2->t) & (VT_CONSTANT | VT_VOLATILE));
|
|
|
|
if ((~pointed_type(&type)->t & (VT_CONSTANT | VT_VOLATILE))
|
|
|
|
& newquals)
|
|
|
|
{
|
|
|
|
/* copy the pointer target symbol */
|
2021-10-22 13:39:54 +08:00
|
|
|
type.ref = sym_push(SYM_FIELD, &type.ref->type,
|
2020-01-18 09:36:29 +08:00
|
|
|
0, type.ref->c);
|
|
|
|
copied = 1;
|
|
|
|
pointed_type(&type)->t |= newquals;
|
|
|
|
}
|
|
|
|
/* pointers to incomplete arrays get converted to
|
|
|
|
pointers to completed ones if possible */
|
|
|
|
if (pt1->t & VT_ARRAY
|
|
|
|
&& pt2->t & VT_ARRAY
|
|
|
|
&& pointed_type(&type)->ref->c < 0
|
|
|
|
&& (pt1->ref->c > 0 || pt2->ref->c > 0))
|
|
|
|
{
|
|
|
|
if (!copied)
|
2021-10-22 13:39:54 +08:00
|
|
|
type.ref = sym_push(SYM_FIELD, &type.ref->type,
|
2020-01-18 09:36:29 +08:00
|
|
|
0, type.ref->c);
|
|
|
|
pointed_type(&type)->ref =
|
2021-10-22 13:39:54 +08:00
|
|
|
sym_push(SYM_FIELD, &pointed_type(&type)->ref->type,
|
2020-01-18 09:36:29 +08:00
|
|
|
0, pointed_type(&type)->ref->c);
|
|
|
|
pointed_type(&type)->ref->c =
|
|
|
|
0 < pt1->ref->c ? pt1->ref->c : pt2->ref->c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-06-18 00:08:09 +08:00
|
|
|
if (TOK_ISCOND(op))
|
2020-01-18 09:36:29 +08:00
|
|
|
type.t = VT_SIZE_T;
|
|
|
|
} else if (bt1 == VT_STRUCT || bt2 == VT_STRUCT) {
|
|
|
|
if (op != '?' || !compare_types(type1, type2, 1))
|
|
|
|
ret = 0;
|
|
|
|
type = *type1;
|
|
|
|
} else if (is_float(bt1) || is_float(bt2)) {
|
|
|
|
if (bt1 == VT_LDOUBLE || bt2 == VT_LDOUBLE) {
|
|
|
|
type.t = VT_LDOUBLE;
|
|
|
|
} else if (bt1 == VT_DOUBLE || bt2 == VT_DOUBLE) {
|
|
|
|
type.t = VT_DOUBLE;
|
|
|
|
} else {
|
|
|
|
type.t = VT_FLOAT;
|
|
|
|
}
|
|
|
|
} else if (bt1 == VT_LLONG || bt2 == VT_LLONG) {
|
|
|
|
/* cast to biggest op */
|
|
|
|
type.t = VT_LLONG | VT_LONG;
|
|
|
|
if (bt1 == VT_LLONG)
|
|
|
|
type.t &= t1;
|
|
|
|
if (bt2 == VT_LLONG)
|
|
|
|
type.t &= t2;
|
|
|
|
/* convert to unsigned if it does not fit in a long long */
|
|
|
|
if ((t1 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (VT_LLONG | VT_UNSIGNED) ||
|
|
|
|
(t2 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (VT_LLONG | VT_UNSIGNED))
|
|
|
|
type.t |= VT_UNSIGNED;
|
|
|
|
} else {
|
|
|
|
/* integer operations */
|
|
|
|
type.t = VT_INT | (VT_LONG & (t1 | t2));
|
|
|
|
/* convert to unsigned if it does not fit in an integer */
|
|
|
|
if ((t1 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (VT_INT | VT_UNSIGNED) ||
|
|
|
|
(t2 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (VT_INT | VT_UNSIGNED))
|
|
|
|
type.t |= VT_UNSIGNED;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2020-01-18 09:36:29 +08:00
|
|
|
if (dest)
|
|
|
|
*dest = type;
|
|
|
|
return ret;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* generic gen_op: handles types problems */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void gen_op(int op)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-12-08 17:49:28 +08:00
|
|
|
int t1, t2, bt1, bt2, t;
|
2020-01-18 09:36:29 +08:00
|
|
|
CType type1, combtype;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2016-07-14 10:09:49 +08:00
|
|
|
redo:
|
2021-10-22 13:39:54 +08:00
|
|
|
t1 = vtop[-1].type.t;
|
|
|
|
t2 = vtop[0].type.t;
|
2009-05-06 02:18:10 +08:00
|
|
|
bt1 = t1 & VT_BTYPE;
|
|
|
|
bt2 = t2 & VT_BTYPE;
|
2015-07-30 04:53:57 +08:00
|
|
|
|
2020-01-18 09:36:29 +08:00
|
|
|
if (bt1 == VT_FUNC || bt2 == VT_FUNC) {
|
2016-07-14 10:09:49 +08:00
|
|
|
if (bt2 == VT_FUNC) {
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
gaddrof();
|
2016-07-14 10:09:49 +08:00
|
|
|
}
|
|
|
|
if (bt1 == VT_FUNC) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
gaddrof();
|
|
|
|
vswap();
|
2016-07-14 10:09:49 +08:00
|
|
|
}
|
|
|
|
goto redo;
|
2021-10-22 13:39:54 +08:00
|
|
|
} else if (!combine_types(&combtype, vtop - 1, vtop, op)) {
|
|
|
|
tcc_error_noabort("invalid operand types for binary operation");
|
|
|
|
vpop();
|
2016-05-12 07:12:04 +08:00
|
|
|
} else if (bt1 == VT_PTR || bt2 == VT_PTR) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* at least one operand is a pointer */
|
2017-05-08 12:38:09 +08:00
|
|
|
/* relational op: must be both pointers */
|
2021-12-08 17:49:28 +08:00
|
|
|
int align;
|
2020-06-18 00:08:09 +08:00
|
|
|
if (TOK_ISCOND(op))
|
2009-05-06 02:18:10 +08:00
|
|
|
goto std_op;
|
|
|
|
/* if both pointers, then it must be the '-' op */
|
|
|
|
if (bt1 == VT_PTR && bt2 == VT_PTR) {
|
|
|
|
if (op != '-')
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("cannot use pointers here");
|
2021-12-08 17:49:28 +08:00
|
|
|
vpush_type_size(pointed_type(&vtop[-1].type), &align);
|
2021-10-22 13:39:54 +08:00
|
|
|
vrott(3);
|
|
|
|
gen_opic(op);
|
|
|
|
vtop->type.t = VT_PTRDIFF_T;
|
|
|
|
vswap();
|
|
|
|
gen_op(TOK_PDIV);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
/* exactly one pointer : must be '+' or '-'. */
|
|
|
|
if (op != '-' && op != '+')
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("cannot use pointers here");
|
2009-05-06 02:18:10 +08:00
|
|
|
/* Put pointer as first operand */
|
|
|
|
if (bt2 == VT_PTR) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
2017-02-09 02:45:31 +08:00
|
|
|
t = t1, t1 = t2, t2 = t;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2016-10-14 01:21:43 +08:00
|
|
|
#if PTR_SIZE == 4
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop[0].type.t & VT_BTYPE) == VT_LLONG)
|
2016-10-14 01:21:43 +08:00
|
|
|
/* XXX: truncate here because gen_opl can't handle ptr + long long */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cast_s(VT_INT);
|
2016-10-14 01:21:43 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
type1 = vtop[-1].type;
|
2021-12-08 17:49:28 +08:00
|
|
|
vpush_type_size(pointed_type(&vtop[-1].type), &align);
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op('*');
|
2019-12-12 19:56:06 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->do_bounds_check && !const_wanted) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* if bounded pointers, we generate a special code to
|
|
|
|
test bounds */
|
|
|
|
if (op == '-') {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
|
|
|
vswap();
|
|
|
|
gen_op('-');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_bounded_ptr_add();
|
2009-05-06 02:18:10 +08:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_opic(op);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-12-08 17:49:28 +08:00
|
|
|
type1.t &= ~(VT_ARRAY|VT_VLA);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* put again type if gen_opic() swaped operands */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type = type1;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2020-01-18 09:36:29 +08:00
|
|
|
} else {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* floats can only be used for a few operations */
|
2020-01-18 09:36:29 +08:00
|
|
|
if (is_float(combtype.t)
|
|
|
|
&& op != '+' && op != '-' && op != '*' && op != '/'
|
2020-06-18 00:08:09 +08:00
|
|
|
&& !TOK_ISCOND(op))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("invalid operands for binary operation");
|
2020-01-18 09:36:29 +08:00
|
|
|
else if (op == TOK_SHR || op == TOK_SAR || op == TOK_SHL) {
|
|
|
|
t = bt1 == VT_LLONG ? VT_LLONG : VT_INT;
|
|
|
|
if ((t1 & (VT_BTYPE | VT_UNSIGNED | VT_BITFIELD)) == (t | VT_UNSIGNED))
|
|
|
|
t |= VT_UNSIGNED;
|
|
|
|
t |= (VT_LONG & t1);
|
|
|
|
combtype.t = t;
|
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
std_op:
|
2020-01-18 09:36:29 +08:00
|
|
|
t = t2 = combtype.t;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* XXX: currently, some unsigned operations are explicit, so
|
|
|
|
we modify them here */
|
|
|
|
if (t & VT_UNSIGNED) {
|
|
|
|
if (op == TOK_SAR)
|
|
|
|
op = TOK_SHR;
|
|
|
|
else if (op == '/')
|
|
|
|
op = TOK_UDIV;
|
|
|
|
else if (op == '%')
|
|
|
|
op = TOK_UMOD;
|
|
|
|
else if (op == TOK_LT)
|
|
|
|
op = TOK_ULT;
|
|
|
|
else if (op == TOK_GT)
|
|
|
|
op = TOK_UGT;
|
|
|
|
else if (op == TOK_LE)
|
|
|
|
op = TOK_ULE;
|
|
|
|
else if (op == TOK_GE)
|
|
|
|
op = TOK_UGE;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
|
|
|
gen_cast_s(t);
|
|
|
|
vswap();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* special case for shifts and long long: we keep the shift as
|
|
|
|
an integer */
|
|
|
|
if (op == TOK_SHR || op == TOK_SAR || op == TOK_SHL)
|
2020-01-18 09:36:29 +08:00
|
|
|
t2 = VT_INT;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cast_s(t2);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (is_float(t))
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_opif(op);
|
2009-05-06 02:18:10 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_opic(op);
|
2020-06-18 00:08:09 +08:00
|
|
|
if (TOK_ISCOND(op)) {
|
2017-05-08 12:38:09 +08:00
|
|
|
/* relational op: the result is an int */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = VT_INT;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = t;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
2015-02-21 06:18:41 +08:00
|
|
|
// Make sure that we have converted to an rvalue:
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r & VT_LVAL)
|
|
|
|
gv(is_float(vtop->type.t & VT_BTYPE) ? RC_FLOAT : RC_INT);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2019-12-17 01:44:35 +08:00
|
|
|
#if defined TCC_TARGET_ARM64 || defined TCC_TARGET_RISCV64 || defined TCC_TARGET_ARM
|
|
|
|
#define gen_cvt_itof1 gen_cvt_itof
|
|
|
|
#else
|
2009-05-06 02:18:10 +08:00
|
|
|
/* generic itof for unsigned long long case */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_cvt_itof1(int t)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
|
2009-05-06 02:18:10 +08:00
|
|
|
(VT_LLONG | VT_UNSIGNED)) {
|
|
|
|
|
|
|
|
if (t == VT_FLOAT)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK___floatundisf);
|
2009-05-06 02:18:10 +08:00
|
|
|
#if LDOUBLE_SIZE != 8
|
|
|
|
else if (t == VT_LDOUBLE)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK___floatundixf);
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK___floatundidf);
|
|
|
|
vrott(2);
|
|
|
|
gfunc_call(1);
|
|
|
|
vpushi(0);
|
|
|
|
PUT_R_RET(vtop, t);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cvt_itof(t);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-12-17 01:44:35 +08:00
|
|
|
#if defined TCC_TARGET_ARM64 || defined TCC_TARGET_RISCV64
|
|
|
|
#define gen_cvt_ftoi1 gen_cvt_ftoi
|
|
|
|
#else
|
2009-05-06 02:18:10 +08:00
|
|
|
/* generic ftoi for unsigned long long case */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_cvt_ftoi1(int t)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int st;
|
|
|
|
if (t == (VT_LLONG | VT_UNSIGNED)) {
|
|
|
|
/* not handled natively */
|
2021-10-22 13:39:54 +08:00
|
|
|
st = vtop->type.t & VT_BTYPE;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (st == VT_FLOAT)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK___fixunssfdi);
|
2009-05-06 02:18:10 +08:00
|
|
|
#if LDOUBLE_SIZE != 8
|
|
|
|
else if (st == VT_LDOUBLE)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK___fixunsxfdi);
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK___fixunsdfdi);
|
|
|
|
vrott(2);
|
|
|
|
gfunc_call(1);
|
|
|
|
vpushi(0);
|
|
|
|
PUT_R_RET(vtop, t);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cvt_ftoi(t);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
2019-12-17 01:44:35 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
/* special delayed cast for char/short */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void force_charshort_cast(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
int sbt = BFGET(vtop->r, VT_MUSTCAST) == 2 ? VT_LLONG : VT_INT;
|
|
|
|
int dbt = vtop->type.t;
|
|
|
|
vtop->r &= ~VT_MUSTCAST;
|
|
|
|
vtop->type.t = sbt;
|
|
|
|
gen_cast_s(dbt == VT_BOOL ? VT_BYTE|VT_UNSIGNED : dbt);
|
|
|
|
vtop->type.t = dbt;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_cast_s(int t)
|
2017-07-09 18:34:11 +08:00
|
|
|
{
|
|
|
|
CType type;
|
|
|
|
type.t = t;
|
|
|
|
type.ref = NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cast(&type);
|
2017-07-09 18:34:11 +08:00
|
|
|
}
|
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
/* cast 'vtop' to 'type'. Casting to bitfields is forbidden. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_cast(CType *type)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2019-12-17 01:51:28 +08:00
|
|
|
int sbt, dbt, sf, df, c;
|
|
|
|
int dbt_bt, sbt_bt, ds, ss, bits, trunc;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
/* special delayed cast for char/short */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r & VT_MUSTCAST)
|
|
|
|
force_charshort_cast();
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
/* bitfields first get cast to ints */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->type.t & VT_BITFIELD)
|
|
|
|
gv(RC_INT);
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
dbt = type->t & (VT_BTYPE | VT_UNSIGNED);
|
2021-10-22 13:39:54 +08:00
|
|
|
sbt = vtop->type.t & (VT_BTYPE | VT_UNSIGNED);
|
2019-12-17 01:51:28 +08:00
|
|
|
if (sbt == VT_FUNC)
|
|
|
|
sbt = VT_PTR;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
again:
|
2009-05-06 02:18:10 +08:00
|
|
|
if (sbt != dbt) {
|
|
|
|
sf = is_float(sbt);
|
|
|
|
df = is_float(dbt);
|
2019-12-17 01:51:28 +08:00
|
|
|
dbt_bt = dbt & VT_BTYPE;
|
|
|
|
sbt_bt = sbt & VT_BTYPE;
|
2021-01-27 05:25:53 +08:00
|
|
|
if (dbt_bt == VT_VOID)
|
|
|
|
goto done;
|
|
|
|
if (sbt_bt == VT_VOID) {
|
|
|
|
error:
|
2021-10-22 13:39:54 +08:00
|
|
|
cast_error(&vtop->type, type);
|
2021-01-27 05:25:53 +08:00
|
|
|
}
|
2019-12-17 01:51:28 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
c = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
#if !defined TCC_IS_NATIVE && !defined TCC_IS_NATIVE_387
|
2021-10-22 13:39:54 +08:00
|
|
|
c &= (dbt != VT_LDOUBLE) | !!nocode_wanted;
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
if (c) {
|
|
|
|
/* constant case: we can do it now */
|
|
|
|
/* XXX: in ISOC, cannot do it if error in convert */
|
|
|
|
if (sbt == VT_FLOAT)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.ld = vtop->c.f;
|
2009-05-06 02:18:10 +08:00
|
|
|
else if (sbt == VT_DOUBLE)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.ld = vtop->c.d;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
if (df) {
|
2019-12-17 01:51:28 +08:00
|
|
|
if (sbt_bt == VT_LLONG) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((sbt & VT_UNSIGNED) || !(vtop->c.i >> 63))
|
|
|
|
vtop->c.ld = vtop->c.i;
|
2009-05-06 02:18:10 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.ld = -(long double)-vtop->c.i;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else if(!sf) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((sbt & VT_UNSIGNED) || !(vtop->c.i >> 31))
|
|
|
|
vtop->c.ld = (uint32_t)vtop->c.i;
|
2009-05-06 02:18:10 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.ld = -(long double)-(uint32_t)vtop->c.i;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dbt == VT_FLOAT)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.f = (float)vtop->c.ld;
|
2009-05-06 02:18:10 +08:00
|
|
|
else if (dbt == VT_DOUBLE)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.d = (double)vtop->c.ld;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else if (sf && dbt == VT_BOOL) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = (vtop->c.ld != 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
if(sf)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = vtop->c.ld;
|
2019-12-17 01:51:28 +08:00
|
|
|
else if (sbt_bt == VT_LLONG || (PTR_SIZE == 8 && sbt == VT_PTR))
|
2015-11-18 03:09:35 +08:00
|
|
|
;
|
2009-05-06 02:18:10 +08:00
|
|
|
else if (sbt & VT_UNSIGNED)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = (uint32_t)vtop->c.i;
|
2019-12-17 01:51:28 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = ((uint32_t)vtop->c.i | -(vtop->c.i & 0x80000000));
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
if (dbt_bt == VT_LLONG || (PTR_SIZE == 8 && dbt == VT_PTR))
|
2015-11-18 03:09:35 +08:00
|
|
|
;
|
2009-05-06 02:18:10 +08:00
|
|
|
else if (dbt == VT_BOOL)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = (vtop->c.i != 0);
|
2019-12-17 01:51:28 +08:00
|
|
|
else {
|
|
|
|
uint32_t m = dbt_bt == VT_BYTE ? 0xff :
|
|
|
|
dbt_bt == VT_SHORT ? 0xffff :
|
|
|
|
0xffffffff;
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i &= m;
|
2015-11-18 03:09:35 +08:00
|
|
|
if (!(dbt & VT_UNSIGNED))
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i |= -(vtop->c.i & ((m >> 1) + 1));
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
2019-12-17 01:51:28 +08:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
} else if (dbt == VT_BOOL
|
2021-10-22 13:39:54 +08:00
|
|
|
&& (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM))
|
2019-12-17 01:51:28 +08:00
|
|
|
== (VT_CONST | VT_SYM)) {
|
|
|
|
/* addresses are considered non-zero (see tcctest.c:sinit23) */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r = VT_CONST;
|
|
|
|
vtop->c.i = 1;
|
2019-12-17 01:51:28 +08:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cannot generate code for global or static initializers */
|
2022-08-20 18:58:56 +08:00
|
|
|
if (nocode_wanted & DATA_ONLY_WANTED)
|
2019-12-17 01:51:28 +08:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* non constant case: generate code */
|
|
|
|
if (dbt == VT_BOOL) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_test_zero(TOK_NE);
|
2019-12-17 01:51:28 +08:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sf || df) {
|
2009-05-06 02:18:10 +08:00
|
|
|
if (sf && df) {
|
|
|
|
/* convert from fp to fp */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cvt_ftof(dbt);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else if (df) {
|
|
|
|
/* convert int to fp */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cvt_itof1(dbt);
|
2019-12-17 01:51:28 +08:00
|
|
|
} else {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* convert fp to int */
|
2019-12-17 01:51:28 +08:00
|
|
|
sbt = dbt;
|
|
|
|
if (dbt_bt != VT_LLONG && dbt_bt != VT_INT)
|
|
|
|
sbt = VT_INT;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cvt_ftoi1(sbt);
|
2019-12-17 01:51:28 +08:00
|
|
|
goto again; /* may need char/short cast */
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
ds = btype_size(dbt_bt);
|
|
|
|
ss = btype_size(sbt_bt);
|
2021-01-27 05:25:53 +08:00
|
|
|
if (ds == 0 || ss == 0)
|
|
|
|
goto error;
|
|
|
|
|
2019-12-17 01:51:28 +08:00
|
|
|
if (IS_ENUM(type->t) && type->ref->c < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("cast to incomplete type");
|
2019-12-17 01:51:28 +08:00
|
|
|
|
|
|
|
/* same size and no sign conversion needed */
|
|
|
|
if (ds == ss && ds >= 4)
|
|
|
|
goto done;
|
|
|
|
if (dbt_bt == VT_PTR || sbt_bt == VT_PTR) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("cast between pointer and integer of different size");
|
2019-12-17 01:51:28 +08:00
|
|
|
if (sbt_bt == VT_PTR) {
|
|
|
|
/* put integer type to allow logical operations below */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = (PTR_SIZE == 8 ? VT_LLONG : VT_INT);
|
2019-12-17 01:51:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* processor allows { int a = 0, b = *(char*)&a; }
|
|
|
|
That means that if we cast to less width, we can just
|
|
|
|
change the type and read it still later. */
|
|
|
|
#define ALLOW_SUBTYPE_ACCESS 1
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (ALLOW_SUBTYPE_ACCESS && (vtop->r & VT_LVAL)) {
|
2019-12-17 01:51:28 +08:00
|
|
|
/* value still in memory */
|
|
|
|
if (ds <= ss)
|
|
|
|
goto done;
|
|
|
|
/* ss <= 4 here */
|
2019-09-12 23:45:18 +08:00
|
|
|
if (ds <= 4 && !(dbt == (VT_SHORT | VT_UNSIGNED) && sbt == VT_BYTE)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(RC_INT);
|
2019-12-17 01:51:28 +08:00
|
|
|
goto done; /* no 64bit envolved */
|
|
|
|
}
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(RC_INT);
|
2019-12-17 01:51:28 +08:00
|
|
|
|
|
|
|
trunc = 0;
|
2017-05-13 14:59:06 +08:00
|
|
|
#if PTR_SIZE == 4
|
2019-12-17 01:51:28 +08:00
|
|
|
if (ds == 8) {
|
|
|
|
/* generate high word */
|
|
|
|
if (sbt & VT_UNSIGNED) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
|
|
|
gv(RC_INT);
|
2019-12-17 01:51:28 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gv_dup();
|
|
|
|
vpushi(31);
|
|
|
|
gen_op(TOK_SAR);
|
2019-12-17 01:51:28 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
lbuild(dbt);
|
2019-12-17 01:51:28 +08:00
|
|
|
} else if (ss == 8) {
|
|
|
|
/* from long long: just take low order word */
|
2021-10-22 13:39:54 +08:00
|
|
|
lexpand();
|
|
|
|
vpop();
|
2019-12-17 01:51:28 +08:00
|
|
|
}
|
|
|
|
ss = 4;
|
|
|
|
|
|
|
|
#elif PTR_SIZE == 8
|
|
|
|
if (ds == 8) {
|
|
|
|
/* need to convert from 32bit to 64bit */
|
|
|
|
if (sbt & VT_UNSIGNED) {
|
|
|
|
#if defined(TCC_TARGET_RISCV64)
|
|
|
|
/* RISC-V keeps 32bit vals in registers sign-extended.
|
|
|
|
So here we need a zero-extension. */
|
|
|
|
trunc = 32;
|
2018-04-01 03:52:20 +08:00
|
|
|
#else
|
2019-12-17 01:51:28 +08:00
|
|
|
goto done;
|
2018-04-01 03:52:20 +08:00
|
|
|
#endif
|
2019-12-17 01:51:28 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cvt_sxtw();
|
2019-12-17 01:51:28 +08:00
|
|
|
goto done;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2019-12-17 01:51:28 +08:00
|
|
|
ss = ds, ds = 4, dbt = sbt;
|
|
|
|
} else if (ss == 8) {
|
2020-07-30 15:40:35 +08:00
|
|
|
/* RISC-V keeps 32bit vals in registers sign-extended.
|
|
|
|
So here we need a sign-extension for signed types and
|
|
|
|
zero-extension. for unsigned types. */
|
|
|
|
#if !defined(TCC_TARGET_RISCV64)
|
|
|
|
trunc = 32; /* zero upper 32 bits for non RISC-V targets */
|
|
|
|
#endif
|
2019-12-17 01:51:28 +08:00
|
|
|
} else {
|
|
|
|
ss = 4;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2019-12-17 01:51:28 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (ds >= ss)
|
|
|
|
goto done;
|
|
|
|
#if defined TCC_TARGET_I386 || defined TCC_TARGET_X86_64 || defined TCC_TARGET_ARM64
|
|
|
|
if (ss == 4) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cvt_csti(dbt);
|
2019-12-17 01:51:28 +08:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
bits = (ss - ds) * 8;
|
|
|
|
/* for unsigned, gen_op will convert SAR to SHR */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = (ss == 8 ? VT_LLONG : VT_INT) | (dbt & VT_UNSIGNED);
|
|
|
|
vpushi(bits);
|
|
|
|
gen_op(TOK_SHL);
|
|
|
|
vpushi(bits - trunc);
|
|
|
|
gen_op(TOK_SAR);
|
|
|
|
vpushi(trunc);
|
|
|
|
gen_op(TOK_SHR);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2019-12-17 01:51:28 +08:00
|
|
|
done:
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type = *type;
|
|
|
|
vtop->type.t &= ~ ( VT_CONSTANT | VT_VOLATILE | VT_ARRAY );
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2011-04-07 00:17:03 +08:00
|
|
|
/* return type size as known at compile time. Put alignment at 'a' */
|
2009-12-20 08:53:49 +08:00
|
|
|
ST_FUNC int type_size(CType *type, int *a)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
Sym *s;
|
|
|
|
int bt;
|
|
|
|
|
|
|
|
bt = type->t & VT_BTYPE;
|
|
|
|
if (bt == VT_STRUCT) {
|
|
|
|
/* struct/union */
|
|
|
|
s = type->ref;
|
2011-03-19 08:47:35 +08:00
|
|
|
*a = s->r;
|
2009-05-06 02:18:10 +08:00
|
|
|
return s->c;
|
|
|
|
} else if (bt == VT_PTR) {
|
2011-04-08 16:09:39 +08:00
|
|
|
if (type->t & VT_ARRAY) {
|
2009-05-06 02:18:10 +08:00
|
|
|
int ts;
|
|
|
|
|
|
|
|
s = type->ref;
|
|
|
|
ts = type_size(&s->type, a);
|
|
|
|
|
|
|
|
if (ts < 0 && s->c < 0)
|
|
|
|
ts = -ts;
|
|
|
|
|
|
|
|
return ts * s->c;
|
|
|
|
} else {
|
|
|
|
*a = PTR_SIZE;
|
|
|
|
return PTR_SIZE;
|
|
|
|
}
|
2019-04-07 09:15:05 +08:00
|
|
|
} else if (IS_ENUM(type->t) && type->ref->c < 0) {
|
2022-04-12 07:30:44 +08:00
|
|
|
*a = 0;
|
2017-07-09 18:38:25 +08:00
|
|
|
return -1; /* incomplete enum */
|
2009-05-06 02:18:10 +08:00
|
|
|
} else if (bt == VT_LDOUBLE) {
|
|
|
|
*a = LDOUBLE_ALIGN;
|
|
|
|
return LDOUBLE_SIZE;
|
|
|
|
} else if (bt == VT_DOUBLE || bt == VT_LLONG) {
|
|
|
|
#ifdef TCC_TARGET_I386
|
|
|
|
#ifdef TCC_TARGET_PE
|
|
|
|
*a = 8;
|
|
|
|
#else
|
|
|
|
*a = 4;
|
|
|
|
#endif
|
|
|
|
#elif defined(TCC_TARGET_ARM)
|
|
|
|
#ifdef TCC_ARM_EABI
|
2015-07-30 04:53:57 +08:00
|
|
|
*a = 8;
|
2009-05-06 02:18:10 +08:00
|
|
|
#else
|
|
|
|
*a = 4;
|
|
|
|
#endif
|
|
|
|
#else
|
|
|
|
*a = 8;
|
|
|
|
#endif
|
|
|
|
return 8;
|
2016-03-24 22:44:01 +08:00
|
|
|
} else if (bt == VT_INT || bt == VT_FLOAT) {
|
2009-05-06 02:18:10 +08:00
|
|
|
*a = 4;
|
|
|
|
return 4;
|
|
|
|
} else if (bt == VT_SHORT) {
|
|
|
|
*a = 2;
|
|
|
|
return 2;
|
2013-04-19 18:08:12 +08:00
|
|
|
} else if (bt == VT_QLONG || bt == VT_QFLOAT) {
|
|
|
|
*a = 8;
|
|
|
|
return 16;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
/* char, void, function, _Bool */
|
|
|
|
*a = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-07 00:17:03 +08:00
|
|
|
/* push type size as known at runtime time on top of value stack. Put
|
|
|
|
alignment at 'a' */
|
2021-12-08 17:49:28 +08:00
|
|
|
static void vpush_type_size(CType *type, int *a)
|
2011-04-07 00:17:03 +08:00
|
|
|
{
|
|
|
|
if (type->t & VT_VLA) {
|
2016-10-09 06:13:31 +08:00
|
|
|
type_size(&type->ref->type, a);
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(&int_type, VT_LOCAL|VT_LVAL, type->ref->c);
|
2011-04-07 00:17:03 +08:00
|
|
|
} else {
|
2021-12-08 17:49:28 +08:00
|
|
|
int size = type_size(type, a);
|
|
|
|
if (size < 0)
|
|
|
|
tcc_error("unknown type size");
|
2022-03-17 17:06:24 +08:00
|
|
|
#if PTR_SIZE == 8
|
|
|
|
vpushll(size);
|
|
|
|
#else
|
2021-12-08 17:49:28 +08:00
|
|
|
vpushi(size);
|
2022-03-17 17:06:24 +08:00
|
|
|
#endif
|
2011-04-07 00:17:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* return the pointed type of t */
|
|
|
|
static inline CType *pointed_type(CType *type)
|
|
|
|
{
|
|
|
|
return &type->ref->type;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* modify type so that its it is a pointer to type. */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void mk_pointer(CType *type)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
Sym *s;
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_push(SYM_FIELD, type, 0, -1);
|
2017-07-09 18:38:25 +08:00
|
|
|
type->t = VT_PTR | (type->t & VT_STORAGE);
|
2009-05-06 02:18:10 +08:00
|
|
|
type->ref = s;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return true if type1 and type2 are exactly the same (including
|
2015-07-30 04:53:57 +08:00
|
|
|
qualifiers).
|
2009-05-06 02:18:10 +08:00
|
|
|
*/
|
|
|
|
static int is_compatible_types(CType *type1, CType *type2)
|
|
|
|
{
|
|
|
|
return compare_types(type1,type2,0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return true if type1 and type2 are the same (ignoring qualifiers).
|
|
|
|
*/
|
2017-07-09 18:38:25 +08:00
|
|
|
static int is_compatible_unqualified_types(CType *type1, CType *type2)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
return compare_types(type1,type2,1);
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void cast_error(CType *st, CType *dt)
|
2020-02-18 01:25:43 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
type_incompatibility_error(st, dt, "cannot convert '%s' to '%s'");
|
2019-12-17 01:51:28 +08:00
|
|
|
}
|
|
|
|
|
2019-12-17 01:44:35 +08:00
|
|
|
/* verify type compatibility to store vtop in 'dt' type */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void verify_assign_cast(CType *dt)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2017-09-25 00:57:48 +08:00
|
|
|
CType *st, *type1, *type2;
|
2018-06-01 05:52:07 +08:00
|
|
|
int dbt, sbt, qualwarn, lvl;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
st = &vtop->type; /* source type */
|
2009-05-06 02:18:10 +08:00
|
|
|
dbt = dt->t & VT_BTYPE;
|
|
|
|
sbt = st->t & VT_BTYPE;
|
|
|
|
if (dt->t & VT_CONSTANT)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("assignment of read-only location");
|
2009-05-06 02:18:10 +08:00
|
|
|
switch(dbt) {
|
2019-12-17 01:51:28 +08:00
|
|
|
case VT_VOID:
|
|
|
|
if (sbt != dbt)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("assignment to void expression");
|
2019-12-17 01:51:28 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case VT_PTR:
|
|
|
|
/* special cases for pointers */
|
|
|
|
/* '0' can also be a pointer */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (is_null_pointer(vtop))
|
2018-06-01 05:52:07 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* accept implicit pointer to integer cast with warning */
|
|
|
|
if (is_integer_btype(sbt)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("assignment makes pointer from integer without a cast");
|
2018-06-01 05:52:07 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
type1 = pointed_type(dt);
|
2018-06-01 05:52:07 +08:00
|
|
|
if (sbt == VT_PTR)
|
|
|
|
type2 = pointed_type(st);
|
|
|
|
else if (sbt == VT_FUNC)
|
|
|
|
type2 = st; /* a function is implicitly a function pointer */
|
|
|
|
else
|
2009-05-06 02:18:10 +08:00
|
|
|
goto error;
|
2018-06-01 05:52:07 +08:00
|
|
|
if (is_compatible_types(type1, type2))
|
|
|
|
break;
|
|
|
|
for (qualwarn = lvl = 0;; ++lvl) {
|
|
|
|
if (((type2->t & VT_CONSTANT) && !(type1->t & VT_CONSTANT)) ||
|
|
|
|
((type2->t & VT_VOLATILE) && !(type1->t & VT_VOLATILE)))
|
|
|
|
qualwarn = 1;
|
|
|
|
dbt = type1->t & (VT_BTYPE|VT_LONG);
|
|
|
|
sbt = type2->t & (VT_BTYPE|VT_LONG);
|
|
|
|
if (dbt != VT_PTR || sbt != VT_PTR)
|
|
|
|
break;
|
|
|
|
type1 = pointed_type(type1);
|
|
|
|
type2 = pointed_type(type2);
|
|
|
|
}
|
|
|
|
if (!is_compatible_unqualified_types(type1, type2)) {
|
|
|
|
if ((dbt == VT_VOID || sbt == VT_VOID) && lvl == 0) {
|
|
|
|
/* void * can match anything */
|
|
|
|
} else if (dbt == sbt
|
|
|
|
&& is_integer_btype(sbt & VT_BTYPE)
|
|
|
|
&& IS_ENUM(type1->t) + IS_ENUM(type2->t)
|
|
|
|
+ !!((type1->t ^ type2->t) & VT_UNSIGNED) < 2) {
|
2016-08-07 08:15:34 +08:00
|
|
|
/* Like GCC don't warn by default for merely changes
|
|
|
|
in pointer target signedness. Do warn for different
|
|
|
|
base types, though, in particular for unsigned enums
|
|
|
|
and signed int targets. */
|
2018-06-01 05:52:07 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("assignment from incompatible pointer type");
|
2018-06-01 05:52:07 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2018-06-01 05:52:07 +08:00
|
|
|
if (qualwarn)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning_c(warn_discarded_qualifiers)("assignment discards qualifiers from pointer target type");
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case VT_BYTE:
|
|
|
|
case VT_SHORT:
|
|
|
|
case VT_INT:
|
|
|
|
case VT_LLONG:
|
|
|
|
if (sbt == VT_PTR || sbt == VT_FUNC) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("assignment makes integer from pointer without a cast");
|
2016-05-26 00:52:08 +08:00
|
|
|
} else if (sbt == VT_STRUCT) {
|
|
|
|
goto case_VT_STRUCT;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
/* XXX: more tests */
|
|
|
|
break;
|
|
|
|
case VT_STRUCT:
|
2016-05-26 00:52:08 +08:00
|
|
|
case_VT_STRUCT:
|
2017-09-25 00:57:48 +08:00
|
|
|
if (!is_compatible_unqualified_types(dt, st)) {
|
2019-12-17 01:51:28 +08:00
|
|
|
error:
|
2021-10-22 13:39:54 +08:00
|
|
|
cast_error(st, dt);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2019-12-17 01:44:35 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_assign_cast(CType *dt)
|
2019-12-17 01:44:35 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
verify_assign_cast(dt);
|
|
|
|
gen_cast(dt);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* store vtop in lvalue pushed on stack */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void vstore(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2019-12-17 01:44:35 +08:00
|
|
|
int sbt, dbt, ft, r, size, align, bit_size, bit_pos, delayed_cast;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ft = vtop[-1].type.t;
|
|
|
|
sbt = vtop->type.t & VT_BTYPE;
|
2009-05-06 02:18:10 +08:00
|
|
|
dbt = ft & VT_BTYPE;
|
2021-10-22 13:39:54 +08:00
|
|
|
verify_assign_cast(&vtop[-1].type);
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
if (sbt == VT_STRUCT) {
|
|
|
|
/* if structure, only generate pointer */
|
|
|
|
/* structure assignment : generate memcpy */
|
2022-07-16 04:31:24 +08:00
|
|
|
size = type_size(&vtop->type, &align);
|
|
|
|
/* destination, keep on stack() as result */
|
|
|
|
vpushv(vtop - 1);
|
2022-07-09 12:53:29 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2022-07-16 04:31:24 +08:00
|
|
|
if (vtop->r & VT_MUSTBOUND)
|
|
|
|
gbound(); /* check would be wrong after gaddrof() */
|
2022-07-09 12:53:29 +08:00
|
|
|
#endif
|
2022-07-16 04:31:24 +08:00
|
|
|
vtop->type.t = VT_PTR;
|
|
|
|
gaddrof();
|
|
|
|
/* source */
|
|
|
|
vswap();
|
2022-07-09 12:53:29 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2022-07-16 04:31:24 +08:00
|
|
|
if (vtop->r & VT_MUSTBOUND)
|
|
|
|
gbound();
|
2022-07-09 12:53:29 +08:00
|
|
|
#endif
|
2022-07-16 04:31:24 +08:00
|
|
|
vtop->type.t = VT_PTR;
|
|
|
|
gaddrof();
|
2022-07-09 12:53:29 +08:00
|
|
|
|
2022-07-16 04:31:24 +08:00
|
|
|
#ifdef TCC_TARGET_NATIVE_STRUCT_COPY
|
|
|
|
if (1
|
2020-01-18 05:58:39 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2022-07-16 04:31:24 +08:00
|
|
|
&& !tcc_state->do_bounds_check
|
2020-01-18 05:58:39 +08:00
|
|
|
#endif
|
2022-07-16 04:31:24 +08:00
|
|
|
) {
|
|
|
|
gen_struct_copy(size);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* type size */
|
|
|
|
vpushi(size);
|
|
|
|
/* Use memmove, rather than memcpy, as dest and src may be same: */
|
2009-05-06 02:18:10 +08:00
|
|
|
#ifdef TCC_ARM_EABI
|
|
|
|
if(!(align & 7))
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK_memmove8);
|
2009-05-06 02:18:10 +08:00
|
|
|
else if(!(align & 3))
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK_memmove4);
|
2009-05-06 02:18:10 +08:00
|
|
|
else
|
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK_memmove);
|
2022-07-16 04:31:24 +08:00
|
|
|
vrott(4);
|
2021-10-22 13:39:54 +08:00
|
|
|
gfunc_call(3);
|
2022-07-16 04:31:24 +08:00
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
} else if (ft & VT_BITFIELD) {
|
|
|
|
/* bitfield store handling */
|
2014-09-23 18:30:08 +08:00
|
|
|
|
|
|
|
/* save lvalue as expression result (example: s.b = s.a = n;) */
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup(), vtop[-1] = vtop[-2];
|
2014-09-23 18:30:08 +08:00
|
|
|
|
2017-07-09 18:38:59 +08:00
|
|
|
bit_pos = BIT_POS(ft);
|
|
|
|
bit_size = BIT_SIZE(ft);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* remove bit field info to avoid loops */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop[-1].type.t = ft & ~VT_STRUCT_MASK;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2019-12-17 01:44:35 +08:00
|
|
|
if (dbt == VT_BOOL) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cast(&vtop[-1].type);
|
|
|
|
vtop[-1].type.t = (vtop[-1].type.t & ~VT_BTYPE) | (VT_BYTE | VT_UNSIGNED);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
r = adjust_bf(vtop - 1, bit_pos, bit_size);
|
2019-12-17 01:44:35 +08:00
|
|
|
if (dbt != VT_BOOL) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cast(&vtop[-1].type);
|
|
|
|
dbt = vtop[-1].type.t & VT_BTYPE;
|
2019-12-17 01:44:35 +08:00
|
|
|
}
|
2017-07-09 18:38:59 +08:00
|
|
|
if (r == VT_STRUCT) {
|
2021-10-22 13:39:54 +08:00
|
|
|
store_packed_bf(bit_pos, bit_size);
|
2017-07-09 18:38:59 +08:00
|
|
|
} else {
|
|
|
|
unsigned long long mask = (1ULL << bit_size) - 1;
|
2019-12-17 01:44:35 +08:00
|
|
|
if (dbt != VT_BOOL) {
|
2017-07-09 18:38:59 +08:00
|
|
|
/* mask source */
|
2019-12-17 01:44:35 +08:00
|
|
|
if (dbt == VT_LLONG)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushll(mask);
|
2017-07-09 18:38:59 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi((unsigned)mask);
|
|
|
|
gen_op('&');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-07-09 18:38:59 +08:00
|
|
|
/* shift source */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(bit_pos);
|
|
|
|
gen_op(TOK_SHL);
|
|
|
|
vswap();
|
2017-07-09 18:38:59 +08:00
|
|
|
/* duplicate destination */
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
|
|
|
vrott(3);
|
2017-07-09 18:38:59 +08:00
|
|
|
/* load destination, mask and or with source */
|
2019-12-17 01:44:35 +08:00
|
|
|
if (dbt == VT_LLONG)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushll(~(mask << bit_pos));
|
2017-07-09 18:38:59 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(~((unsigned)mask << bit_pos));
|
|
|
|
gen_op('&');
|
|
|
|
gen_op('|');
|
2017-07-09 18:38:59 +08:00
|
|
|
/* store result */
|
2021-10-22 13:39:54 +08:00
|
|
|
vstore();
|
2017-07-09 18:38:59 +08:00
|
|
|
/* ... and discard */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
} else if (dbt == VT_VOID) {
|
2021-10-22 13:39:54 +08:00
|
|
|
--vtop;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2019-12-17 01:44:35 +08:00
|
|
|
/* optimize char/short casts */
|
|
|
|
delayed_cast = 0;
|
|
|
|
if ((dbt == VT_BYTE || dbt == VT_SHORT)
|
|
|
|
&& is_integer_btype(sbt)
|
|
|
|
) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r & VT_MUSTCAST)
|
2019-12-17 01:44:35 +08:00
|
|
|
&& btype_size(dbt) > btype_size(sbt)
|
|
|
|
)
|
2021-10-22 13:39:54 +08:00
|
|
|
force_charshort_cast();
|
2019-12-17 01:44:35 +08:00
|
|
|
delayed_cast = 1;
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cast(&vtop[-1].type);
|
2019-12-17 01:44:35 +08:00
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2015-03-10 23:23:00 +08:00
|
|
|
/* bound check case */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop[-1].r & VT_MUSTBOUND) {
|
|
|
|
vswap();
|
|
|
|
gbound();
|
|
|
|
vswap();
|
2015-03-10 23:23:00 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(RC_TYPE(dbt)); /* generate value */
|
2019-12-17 01:44:35 +08:00
|
|
|
|
|
|
|
if (delayed_cast) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r |= BFVAL(VT_MUSTCAST, (sbt == VT_LLONG) + 1);
|
|
|
|
//tcc_warning("deley cast %x -> %x", sbt, dbt);
|
|
|
|
vtop->type.t = ft & VT_TYPE;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2019-12-17 01:44:35 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* if lvalue was saved on stack, must read it */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop[-1].r & VT_VALMASK) == VT_LLOCAL) {
|
2009-05-06 02:18:10 +08:00
|
|
|
SValue sv;
|
2021-10-22 13:39:54 +08:00
|
|
|
r = get_reg(RC_INT);
|
2019-12-17 01:51:28 +08:00
|
|
|
sv.type.t = VT_PTRDIFF_T;
|
2009-05-06 02:18:10 +08:00
|
|
|
sv.r = VT_LOCAL | VT_LVAL;
|
2021-10-22 13:39:54 +08:00
|
|
|
sv.c.i = vtop[-1].c.i;
|
|
|
|
load(r, &sv);
|
|
|
|
vtop[-1].r = r | VT_LVAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2019-12-17 01:44:35 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
r = vtop->r & VT_VALMASK;
|
2019-12-17 01:44:35 +08:00
|
|
|
/* two word case handling :
|
|
|
|
store second register at word + 4 (or +8 for x86-64) */
|
|
|
|
if (USING_TWO_WORDS(dbt)) {
|
2019-12-17 01:51:28 +08:00
|
|
|
int load_type = (dbt == VT_QFLOAT) ? VT_DOUBLE : VT_PTRDIFF_T;
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop[-1].type.t = load_type;
|
|
|
|
store(r, vtop - 1);
|
|
|
|
vswap();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* convert to int to increment easily */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = VT_PTRDIFF_T;
|
|
|
|
gaddrof();
|
|
|
|
vpushs(PTR_SIZE);
|
|
|
|
gen_op('+');
|
|
|
|
vtop->r |= VT_LVAL;
|
|
|
|
vswap();
|
|
|
|
vtop[-1].type.t = load_type;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* XXX: it works because r2 is spilled last ! */
|
2021-10-22 13:39:54 +08:00
|
|
|
store(vtop->r2, vtop - 1);
|
2013-04-19 22:33:16 +08:00
|
|
|
} else {
|
2019-12-17 01:44:35 +08:00
|
|
|
/* single word */
|
2021-10-22 13:39:54 +08:00
|
|
|
store(r, vtop - 1);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vswap();
|
|
|
|
vtop--; /* NOT vpop() because on x86 it would flush the fp stack */
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* post defines POST/PRE add. c is the token ++ or -- */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void inc(int post, int c)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
test_lvalue();
|
|
|
|
vdup(); /* save lvalue */
|
2009-05-06 02:18:10 +08:00
|
|
|
if (post) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gv_dup(); /* duplicate value */
|
|
|
|
vrotb(3);
|
|
|
|
vrotb(3);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
/* add constant */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(c - TOK_MID);
|
|
|
|
gen_op('+');
|
|
|
|
vstore(); /* store value */
|
2009-05-06 02:18:10 +08:00
|
|
|
if (post)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop(); /* if post op, return saved value */
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void parse_mult_str (CString *astr, const char *msg)
|
2016-06-28 02:10:53 +08:00
|
|
|
{
|
|
|
|
/* read the string */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != TOK_STR)
|
|
|
|
expect(msg);
|
|
|
|
cstr_new(astr);
|
|
|
|
while (tok == TOK_STR) {
|
2016-06-28 02:10:53 +08:00
|
|
|
/* XXX: add \0 handling too ? */
|
2021-10-22 13:39:54 +08:00
|
|
|
cstr_cat(astr, tokc.str.data, -1);
|
|
|
|
next();
|
2016-06-28 02:10:53 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
cstr_ccat(astr, '\0');
|
2016-06-28 02:10:53 +08:00
|
|
|
}
|
|
|
|
|
2016-10-09 08:41:34 +08:00
|
|
|
/* If I is >= 1 and a power of two, returns log2(i)+1.
|
|
|
|
If I is 0 returns 0. */
|
2020-05-15 09:46:55 +08:00
|
|
|
ST_FUNC int exact_log2p1(int i)
|
2016-10-09 08:41:34 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
if (!i)
|
|
|
|
return 0;
|
|
|
|
for (ret = 1; i >= 1 << 8; ret += 8)
|
|
|
|
i >>= 8;
|
|
|
|
if (i >= 1 << 4)
|
|
|
|
ret += 4, i >>= 4;
|
|
|
|
if (i >= 1 << 2)
|
|
|
|
ret += 2, i >>= 2;
|
|
|
|
if (i >= 1 << 1)
|
|
|
|
ret++;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-09 18:34:11 +08:00
|
|
|
/* Parse __attribute__((...)) GNUC extension. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void parse_attribute(AttributeDef *ad)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int t, n;
|
2016-06-28 02:10:53 +08:00
|
|
|
CString astr;
|
2015-07-30 04:53:57 +08:00
|
|
|
|
2017-07-09 18:34:11 +08:00
|
|
|
redo:
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != TOK_ATTRIBUTE1 && tok != TOK_ATTRIBUTE2)
|
2017-07-09 18:34:11 +08:00
|
|
|
return;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
skip('(');
|
|
|
|
skip('(');
|
|
|
|
while (tok != ')') {
|
|
|
|
if (tok < TOK_IDENT)
|
|
|
|
expect("attribute name");
|
|
|
|
t = tok;
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
switch(t) {
|
2018-12-20 17:55:22 +08:00
|
|
|
case TOK_CLEANUP1:
|
|
|
|
case TOK_CLEANUP2:
|
|
|
|
{
|
|
|
|
Sym *s;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
s = sym_find(tok);
|
2018-12-20 17:55:22 +08:00
|
|
|
if (!s) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning_c(warn_implicit_function_declaration)(
|
|
|
|
"implicit declaration of function '%s'", get_tok_str(tok, &tokc));
|
|
|
|
s = external_global_sym(tok, &func_old_type);
|
2020-06-18 00:08:09 +08:00
|
|
|
} else if ((s->type.t & VT_BTYPE) != VT_FUNC)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("'%s' is not declared as function", get_tok_str(tok, &tokc));
|
2018-12-20 17:55:22 +08:00
|
|
|
ad->cleanup_func = s;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
skip(')');
|
2018-12-20 17:55:22 +08:00
|
|
|
break;
|
|
|
|
}
|
2020-05-22 11:17:02 +08:00
|
|
|
case TOK_CONSTRUCTOR1:
|
|
|
|
case TOK_CONSTRUCTOR2:
|
2020-01-18 05:58:39 +08:00
|
|
|
ad->f.func_ctor = 1;
|
2019-10-29 14:02:58 +08:00
|
|
|
break;
|
2020-05-22 11:17:02 +08:00
|
|
|
case TOK_DESTRUCTOR1:
|
|
|
|
case TOK_DESTRUCTOR2:
|
2020-01-18 05:58:39 +08:00
|
|
|
ad->f.func_dtor = 1;
|
2019-10-29 14:02:58 +08:00
|
|
|
break;
|
2020-05-22 11:17:02 +08:00
|
|
|
case TOK_ALWAYS_INLINE1:
|
|
|
|
case TOK_ALWAYS_INLINE2:
|
|
|
|
ad->f.func_alwinl = 1;
|
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_SECTION1:
|
|
|
|
case TOK_SECTION2:
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
parse_mult_str(&astr, "section name");
|
|
|
|
ad->section = find_section(tcc_state, (char *)astr.data);
|
|
|
|
skip(')');
|
|
|
|
cstr_free(&astr);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2011-03-03 17:58:45 +08:00
|
|
|
case TOK_ALIAS1:
|
|
|
|
case TOK_ALIAS2:
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
parse_mult_str(&astr, "alias(\"target\")");
|
Reinstate attribute alias handling
commit 2a0167a merged alias and asm symbol renaming, but broke
semantics of aliases, see testcase. Basically the difference between
the two is that an asm rename doesn't generate a new symbol, i.e. with
int foo __asm__("bar");
all source reference to 'foo' will be to 'bar', nothing of the name
'foo' will remain in the object file, and for instance reference to
'foo' from other compilation units won't be resolved to this one.
Aliases OTOH create an additional symbol. With:
void target (void) { return; }
void afunc (void) __attribute__((alias("target")));
reference to 'afunc' will remain 'afunc' in the object file. It will
generate two symbols, 'afunc' and 'target' referring to the same entity.
This difference matters if other compilation units make references to
'afunc'.
A side requirement of this is that for alias to work that the target
symbol needs to be defined in the same unit. For TCC we even require a
stricter variant: it must be defined before the alias is created.
Now, with this I merely re-instated the old flow of events before above
commit. It didn't seem useful anymore to place both names in the
asm_label member of attributes, and the asm_label member of Sym now
again only needs the hold the __asm__ rename.
It also follows that tcc_predefs.h can't make use of attribute alias to
e.g. map __builtin_memcpy to __bound_memcpy (simply because the latter
isn't defined in all units), but rather must use __asm__ renaming, which
in turn means that the underscore handling needs to be done by hand.
2020-09-30 23:46:01 +08:00
|
|
|
ad->alias_target = /* save string as token, for later */
|
2021-10-22 13:39:54 +08:00
|
|
|
tok_alloc((char*)astr.data, astr.size-1)->tok;
|
|
|
|
skip(')');
|
|
|
|
cstr_free(&astr);
|
2011-03-03 17:58:45 +08:00
|
|
|
break;
|
2015-07-30 04:53:57 +08:00
|
|
|
case TOK_VISIBILITY1:
|
|
|
|
case TOK_VISIBILITY2:
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
parse_mult_str(&astr,
|
2016-06-28 02:10:53 +08:00
|
|
|
"visibility(\"default|hidden|internal|protected\")");
|
|
|
|
if (!strcmp (astr.data, "default"))
|
2015-07-30 04:53:57 +08:00
|
|
|
ad->a.visibility = STV_DEFAULT;
|
2016-06-28 02:10:53 +08:00
|
|
|
else if (!strcmp (astr.data, "hidden"))
|
2015-07-30 04:53:57 +08:00
|
|
|
ad->a.visibility = STV_HIDDEN;
|
2016-06-28 02:10:53 +08:00
|
|
|
else if (!strcmp (astr.data, "internal"))
|
2015-07-30 04:53:57 +08:00
|
|
|
ad->a.visibility = STV_INTERNAL;
|
2016-06-28 02:10:53 +08:00
|
|
|
else if (!strcmp (astr.data, "protected"))
|
2015-07-30 04:53:57 +08:00
|
|
|
ad->a.visibility = STV_PROTECTED;
|
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("visibility(\"default|hidden|internal|protected\")");
|
|
|
|
skip(')');
|
|
|
|
cstr_free(&astr);
|
2014-04-14 08:53:11 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_ALIGNED1:
|
|
|
|
case TOK_ALIGNED2:
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '(') {
|
|
|
|
next();
|
|
|
|
n = expr_const();
|
2015-07-30 04:53:57 +08:00
|
|
|
if (n <= 0 || (n & (n - 1)) != 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("alignment must be a positive power of two");
|
|
|
|
skip(')');
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
n = MAX_ALIGN;
|
|
|
|
}
|
2016-10-09 08:41:34 +08:00
|
|
|
ad->a.aligned = exact_log2p1(n);
|
|
|
|
if (n != 1 << (ad->a.aligned - 1))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("alignment of %d is larger than implemented", n);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_PACKED1:
|
|
|
|
case TOK_PACKED2:
|
2014-01-07 21:57:07 +08:00
|
|
|
ad->a.packed = 1;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2010-02-28 00:37:59 +08:00
|
|
|
case TOK_WEAK1:
|
|
|
|
case TOK_WEAK2:
|
2014-01-07 21:57:07 +08:00
|
|
|
ad->a.weak = 1;
|
2010-02-28 00:37:59 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_UNUSED1:
|
|
|
|
case TOK_UNUSED2:
|
|
|
|
/* currently, no need to handle it because tcc does not
|
|
|
|
track unused objects */
|
|
|
|
break;
|
|
|
|
case TOK_NORETURN1:
|
|
|
|
case TOK_NORETURN2:
|
2019-04-29 19:53:07 +08:00
|
|
|
ad->f.func_noreturn = 1;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_CDECL1:
|
|
|
|
case TOK_CDECL2:
|
|
|
|
case TOK_CDECL3:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->f.func_call = FUNC_CDECL;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_STDCALL1:
|
|
|
|
case TOK_STDCALL2:
|
|
|
|
case TOK_STDCALL3:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->f.func_call = FUNC_STDCALL;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
#ifdef TCC_TARGET_I386
|
|
|
|
case TOK_REGPARM1:
|
|
|
|
case TOK_REGPARM2:
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
n = expr_const();
|
2015-07-30 04:53:57 +08:00
|
|
|
if (n > 3)
|
2009-05-06 02:18:10 +08:00
|
|
|
n = 3;
|
|
|
|
else if (n < 0)
|
|
|
|
n = 0;
|
|
|
|
if (n > 0)
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->f.func_call = FUNC_FASTCALL1 + n - 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_FASTCALL1:
|
|
|
|
case TOK_FASTCALL2:
|
|
|
|
case TOK_FASTCALL3:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->f.func_call = FUNC_FASTCALLW;
|
2015-07-30 04:53:57 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
2010-01-27 05:56:22 +08:00
|
|
|
case TOK_MODE:
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
switch(tok) {
|
2010-01-27 05:56:22 +08:00
|
|
|
case TOK_MODE_DI:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->attr_mode = VT_LLONG + 1;
|
2010-01-27 05:56:22 +08:00
|
|
|
break;
|
2016-10-15 22:01:16 +08:00
|
|
|
case TOK_MODE_QI:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->attr_mode = VT_BYTE + 1;
|
2016-10-15 22:01:16 +08:00
|
|
|
break;
|
2010-01-27 05:56:22 +08:00
|
|
|
case TOK_MODE_HI:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->attr_mode = VT_SHORT + 1;
|
2010-01-27 05:56:22 +08:00
|
|
|
break;
|
|
|
|
case TOK_MODE_SI:
|
2016-10-15 22:01:16 +08:00
|
|
|
case TOK_MODE_word:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->attr_mode = VT_INT + 1;
|
2010-01-27 05:56:22 +08:00
|
|
|
break;
|
|
|
|
default:
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("__mode__(%s) not supported\n", get_tok_str(tok, NULL));
|
2010-01-27 05:56:22 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
skip(')');
|
2010-01-27 05:56:22 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_DLLEXPORT:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->a.dllexport = 1;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2018-07-22 07:54:01 +08:00
|
|
|
case TOK_NODECORATE:
|
|
|
|
ad->a.nodecorate = 1;
|
|
|
|
break;
|
2009-11-14 00:14:05 +08:00
|
|
|
case TOK_DLLIMPORT:
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->a.dllimport = 1;
|
2009-11-14 00:14:05 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
default:
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning_c(warn_unsupported)("'%s' attribute ignored", get_tok_str(t, NULL));
|
2009-05-06 02:18:10 +08:00
|
|
|
/* skip parameters */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '(') {
|
2009-05-06 02:18:10 +08:00
|
|
|
int parenthesis = 0;
|
|
|
|
do {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '(')
|
2009-05-06 02:18:10 +08:00
|
|
|
parenthesis++;
|
2021-10-22 13:39:54 +08:00
|
|
|
else if (tok == ')')
|
2009-05-06 02:18:10 +08:00
|
|
|
parenthesis--;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
} while (parenthesis && tok != -1);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != ',')
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
|
|
|
skip(')');
|
2017-07-09 18:34:11 +08:00
|
|
|
goto redo;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2019-04-11 06:30:41 +08:00
|
|
|
static Sym * find_field (CType *type, int v, int *cumofs)
|
2016-08-02 04:11:49 +08:00
|
|
|
{
|
|
|
|
Sym *s = type->ref;
|
2022-12-22 20:08:36 +08:00
|
|
|
int v1 = v | SYM_FIELD;
|
|
|
|
|
2016-08-02 04:11:49 +08:00
|
|
|
while ((s = s->next) != NULL) {
|
2022-12-22 20:08:36 +08:00
|
|
|
if (s->v == v1) {
|
|
|
|
*cumofs += s->c;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if ((s->type.t & VT_BTYPE) == VT_STRUCT
|
|
|
|
&& s->v >= (SYM_FIRST_ANOM | SYM_FIELD)) {
|
|
|
|
/* try to find field in anonymous sub-struct/union */
|
|
|
|
Sym *ret = find_field (&s->type, v1, cumofs);
|
|
|
|
if (ret) {
|
2019-04-11 06:30:41 +08:00
|
|
|
*cumofs += s->c;
|
2022-12-22 20:08:36 +08:00
|
|
|
return ret;
|
2019-04-11 06:30:41 +08:00
|
|
|
}
|
2022-12-22 20:08:36 +08:00
|
|
|
}
|
2016-08-02 04:11:49 +08:00
|
|
|
}
|
2022-12-22 20:08:36 +08:00
|
|
|
|
|
|
|
if (!(v & SYM_FIELD)) { /* top-level call */
|
|
|
|
s = type->ref;
|
|
|
|
if (s->c < 0)
|
|
|
|
tcc_error("dereferencing incomplete type '%s'",
|
|
|
|
get_tok_str(s->v & ~SYM_STRUCT, 0));
|
|
|
|
else
|
|
|
|
tcc_error("field not found: %s",
|
|
|
|
get_tok_str(v, &tokc));
|
|
|
|
}
|
|
|
|
return NULL;
|
2016-08-02 04:11:49 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void check_fields (CType *type, int check)
|
2020-12-03 14:53:44 +08:00
|
|
|
{
|
|
|
|
Sym *s = type->ref;
|
|
|
|
|
|
|
|
while ((s = s->next) != NULL) {
|
2020-12-08 02:02:42 +08:00
|
|
|
int v = s->v & ~SYM_FIELD;
|
|
|
|
if (v < SYM_FIRST_ANOM) {
|
2021-10-22 13:39:54 +08:00
|
|
|
TokenSym *ts = table_ident[v - TOK_IDENT];
|
2020-12-08 02:02:42 +08:00
|
|
|
if (check && (ts->tok & SYM_FIELD))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("duplicate member '%s'", get_tok_str(v, NULL));
|
2020-12-08 02:02:42 +08:00
|
|
|
ts->tok ^= SYM_FIELD;
|
|
|
|
} else if ((s->type.t & VT_BTYPE) == VT_STRUCT)
|
2021-10-22 13:39:54 +08:00
|
|
|
check_fields (&s->type, check);
|
2020-12-03 14:53:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void struct_layout(CType *type, AttributeDef *ad)
|
2016-10-03 03:37:58 +08:00
|
|
|
{
|
2017-07-09 18:38:59 +08:00
|
|
|
int size, align, maxalign, offset, c, bit_pos, bit_size;
|
|
|
|
int packed, a, bt, prevbt, prev_bit_size;
|
2021-10-22 13:39:54 +08:00
|
|
|
int pcc = !tcc_state->ms_bitfields;
|
|
|
|
int pragma_pack = *tcc_state->pack_stack_ptr;
|
2016-10-03 03:37:58 +08:00
|
|
|
Sym *f;
|
2017-07-09 18:38:59 +08:00
|
|
|
|
|
|
|
maxalign = 1;
|
2016-10-03 03:37:58 +08:00
|
|
|
offset = 0;
|
|
|
|
c = 0;
|
2016-10-09 06:52:57 +08:00
|
|
|
bit_pos = 0;
|
2016-11-08 02:36:21 +08:00
|
|
|
prevbt = VT_STRUCT; /* make it never match */
|
2016-11-29 03:27:42 +08:00
|
|
|
prev_bit_size = 0;
|
2017-07-09 18:38:59 +08:00
|
|
|
|
|
|
|
//#define BF_DEBUG
|
2017-05-10 00:10:02 +08:00
|
|
|
|
2016-10-03 03:37:58 +08:00
|
|
|
for (f = type->ref->next; f; f = f->next) {
|
2017-07-16 18:10:00 +08:00
|
|
|
if (f->type.t & VT_BITFIELD)
|
2017-07-09 18:38:59 +08:00
|
|
|
bit_size = BIT_SIZE(f->type.t);
|
2017-07-16 18:10:00 +08:00
|
|
|
else
|
2017-07-09 18:38:59 +08:00
|
|
|
bit_size = -1;
|
|
|
|
size = type_size(&f->type, &align);
|
|
|
|
a = f->a.aligned ? 1 << (f->a.aligned - 1) : 0;
|
|
|
|
packed = 0;
|
|
|
|
|
|
|
|
if (pcc && bit_size == 0) {
|
|
|
|
/* in pcc mode, packing does not affect zero-width bitfields */
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* in pcc mode, attribute packed overrides if set. */
|
|
|
|
if (pcc && (f->a.packed || ad->a.packed))
|
|
|
|
align = packed = 1;
|
|
|
|
|
|
|
|
/* pragma pack overrides align if lesser and packs bitfields always */
|
|
|
|
if (pragma_pack) {
|
|
|
|
packed = 1;
|
|
|
|
if (pragma_pack < align)
|
|
|
|
align = pragma_pack;
|
|
|
|
/* in pcc mode pragma pack also overrides individual align */
|
|
|
|
if (pcc && pragma_pack < a)
|
|
|
|
a = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* some individual align was specified */
|
|
|
|
if (a)
|
|
|
|
align = a;
|
|
|
|
|
|
|
|
if (type->ref->type.t == VT_UNION) {
|
2016-11-29 03:54:59 +08:00
|
|
|
if (pcc && bit_size >= 0)
|
2017-07-09 18:38:59 +08:00
|
|
|
size = (bit_size + 7) >> 3;
|
2016-11-29 03:54:59 +08:00
|
|
|
offset = 0;
|
|
|
|
if (size > c)
|
2017-07-09 18:38:59 +08:00
|
|
|
c = size;
|
|
|
|
|
2016-11-29 03:54:59 +08:00
|
|
|
} else if (bit_size < 0) {
|
2017-07-09 18:38:59 +08:00
|
|
|
if (pcc)
|
|
|
|
c += (bit_pos + 7) >> 3;
|
|
|
|
c = (c + align - 1) & -align;
|
2016-11-29 03:54:59 +08:00
|
|
|
offset = c;
|
|
|
|
if (size > 0)
|
2017-07-09 18:38:59 +08:00
|
|
|
c += size;
|
2016-10-09 06:52:57 +08:00
|
|
|
bit_pos = 0;
|
2017-07-09 18:38:59 +08:00
|
|
|
prevbt = VT_STRUCT;
|
|
|
|
prev_bit_size = 0;
|
|
|
|
|
2016-10-09 06:52:57 +08:00
|
|
|
} else {
|
|
|
|
/* A bit-field. Layout is more complicated. There are two
|
2017-07-09 18:38:59 +08:00
|
|
|
options: PCC (GCC) compatible and MS compatible */
|
|
|
|
if (pcc) {
|
|
|
|
/* In PCC layout a bit-field is placed adjacent to the
|
|
|
|
preceding bit-fields, except if:
|
|
|
|
- it has zero-width
|
|
|
|
- an individual alignment was given
|
|
|
|
- it would overflow its base type container and
|
|
|
|
there is no packing */
|
|
|
|
if (bit_size == 0) {
|
|
|
|
new_field:
|
|
|
|
c = (c + ((bit_pos + 7) >> 3) + align - 1) & -align;
|
2016-10-09 06:52:57 +08:00
|
|
|
bit_pos = 0;
|
2017-07-09 18:38:59 +08:00
|
|
|
} else if (f->a.aligned) {
|
|
|
|
goto new_field;
|
|
|
|
} else if (!packed) {
|
|
|
|
int a8 = align * 8;
|
|
|
|
int ofs = ((c * 8 + bit_pos) % a8 + bit_size + a8 - 1) / a8;
|
|
|
|
if (ofs > size / align)
|
|
|
|
goto new_field;
|
|
|
|
}
|
|
|
|
|
2017-07-16 18:10:00 +08:00
|
|
|
/* in pcc mode, long long bitfields have type int if they fit */
|
|
|
|
if (size == 8 && bit_size <= 32)
|
|
|
|
f->type.t = (f->type.t & ~VT_BTYPE) | VT_INT, size = 4;
|
|
|
|
|
2017-07-15 01:26:01 +08:00
|
|
|
while (bit_pos >= align * 8)
|
|
|
|
c += align, bit_pos -= align * 8;
|
2017-07-09 18:38:59 +08:00
|
|
|
offset = c;
|
2017-07-15 01:26:01 +08:00
|
|
|
|
2016-10-09 06:52:57 +08:00
|
|
|
/* In PCC layout named bit-fields influence the alignment
|
|
|
|
of the containing struct using the base types alignment,
|
2017-07-09 18:38:59 +08:00
|
|
|
except for packed fields (which here have correct align). */
|
2017-07-24 03:24:11 +08:00
|
|
|
if (f->v & SYM_FIRST_ANOM
|
|
|
|
// && bit_size // ??? gcc on ARM/rpi does that
|
|
|
|
)
|
2017-07-09 18:38:59 +08:00
|
|
|
align = 1;
|
2017-07-24 03:24:11 +08:00
|
|
|
|
2016-10-09 06:52:57 +08:00
|
|
|
} else {
|
2016-11-08 02:36:21 +08:00
|
|
|
bt = f->type.t & VT_BTYPE;
|
2017-07-09 18:38:59 +08:00
|
|
|
if ((bit_pos + bit_size > size * 8)
|
|
|
|
|| (bit_size > 0) == (bt != prevbt)
|
|
|
|
) {
|
|
|
|
c = (c + align - 1) & -align;
|
2016-11-08 02:36:21 +08:00
|
|
|
offset = c;
|
|
|
|
bit_pos = 0;
|
|
|
|
/* In MS bitfield mode a bit-field run always uses
|
2016-11-29 03:54:59 +08:00
|
|
|
at least as many bits as the underlying type.
|
|
|
|
To start a new run it's also required that this
|
|
|
|
or the last bit-field had non-zero width. */
|
2016-11-29 03:27:42 +08:00
|
|
|
if (bit_size || prev_bit_size)
|
2017-07-09 18:38:59 +08:00
|
|
|
c += size;
|
2016-11-08 02:36:21 +08:00
|
|
|
}
|
2016-11-29 03:54:59 +08:00
|
|
|
/* In MS layout the records alignment is normally
|
|
|
|
influenced by the field, except for a zero-width
|
|
|
|
field at the start of a run (but by further zero-width
|
|
|
|
fields it is again). */
|
|
|
|
if (bit_size == 0 && prevbt != bt)
|
2017-07-09 18:38:59 +08:00
|
|
|
align = 1;
|
2016-11-08 02:36:21 +08:00
|
|
|
prevbt = bt;
|
2017-07-09 18:38:59 +08:00
|
|
|
prev_bit_size = bit_size;
|
2016-10-09 06:52:57 +08:00
|
|
|
}
|
2017-07-09 18:38:59 +08:00
|
|
|
|
2016-10-09 06:52:57 +08:00
|
|
|
f->type.t = (f->type.t & ~(0x3f << VT_STRUCT_SHIFT))
|
|
|
|
| (bit_pos << VT_STRUCT_SHIFT);
|
|
|
|
bit_pos += bit_size;
|
2016-10-03 03:37:58 +08:00
|
|
|
}
|
2016-11-29 03:54:59 +08:00
|
|
|
if (align > maxalign)
|
2017-07-09 18:38:59 +08:00
|
|
|
maxalign = align;
|
|
|
|
|
|
|
|
#ifdef BF_DEBUG
|
|
|
|
printf("set field %s offset %-2d size %-2d align %-2d",
|
2021-10-22 13:39:54 +08:00
|
|
|
get_tok_str(f->v & ~SYM_FIELD, NULL), offset, size, align);
|
2016-10-03 03:37:58 +08:00
|
|
|
if (f->type.t & VT_BITFIELD) {
|
2017-07-09 18:38:59 +08:00
|
|
|
printf(" pos %-2d bits %-2d",
|
|
|
|
BIT_POS(f->type.t),
|
|
|
|
BIT_SIZE(f->type.t)
|
|
|
|
);
|
2016-10-03 03:37:58 +08:00
|
|
|
}
|
|
|
|
printf("\n");
|
|
|
|
#endif
|
|
|
|
|
2019-04-11 06:30:41 +08:00
|
|
|
f->c = offset;
|
2016-10-03 03:37:58 +08:00
|
|
|
f->r = 0;
|
|
|
|
}
|
2017-07-09 18:38:59 +08:00
|
|
|
|
|
|
|
if (pcc)
|
|
|
|
c += (bit_pos + 7) >> 3;
|
|
|
|
|
2017-07-15 01:26:01 +08:00
|
|
|
/* store size and alignment */
|
|
|
|
a = bt = ad->a.aligned ? 1 << (ad->a.aligned - 1) : 1;
|
2017-07-09 18:38:59 +08:00
|
|
|
if (a < maxalign)
|
|
|
|
a = maxalign;
|
2017-07-15 01:26:01 +08:00
|
|
|
type->ref->r = a;
|
2017-07-24 03:24:11 +08:00
|
|
|
if (pragma_pack && pragma_pack < maxalign && 0 == pcc) {
|
2017-07-15 01:26:01 +08:00
|
|
|
/* can happen if individual align for some member was given. In
|
|
|
|
this case MSVC ignores maxalign when aligning the size */
|
|
|
|
a = pragma_pack;
|
|
|
|
if (a < bt)
|
|
|
|
a = bt;
|
|
|
|
}
|
2017-07-09 18:38:59 +08:00
|
|
|
c = (c + a - 1) & -a;
|
|
|
|
type->ref->c = c;
|
|
|
|
|
|
|
|
#ifdef BF_DEBUG
|
|
|
|
printf("struct size %-2d align %-2d\n\n", c, a), fflush(stdout);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* check whether we can access bitfields by their type */
|
|
|
|
for (f = type->ref->next; f; f = f->next) {
|
|
|
|
int s, px, cx, c0;
|
|
|
|
CType t;
|
|
|
|
|
|
|
|
if (0 == (f->type.t & VT_BITFIELD))
|
|
|
|
continue;
|
|
|
|
f->type.ref = f;
|
|
|
|
f->auxtype = -1;
|
|
|
|
bit_size = BIT_SIZE(f->type.t);
|
|
|
|
if (bit_size == 0)
|
|
|
|
continue;
|
|
|
|
bit_pos = BIT_POS(f->type.t);
|
|
|
|
size = type_size(&f->type, &align);
|
2021-01-24 23:20:48 +08:00
|
|
|
|
|
|
|
if (bit_pos + bit_size <= size * 8 && f->c + size <= c
|
|
|
|
#ifdef TCC_TARGET_ARM
|
|
|
|
&& !(f->c & (align - 1))
|
|
|
|
#endif
|
|
|
|
)
|
2017-07-09 18:38:59 +08:00
|
|
|
continue;
|
|
|
|
|
2017-09-25 09:03:26 +08:00
|
|
|
/* try to access the field using a different type */
|
2017-07-09 18:38:59 +08:00
|
|
|
c0 = -1, s = align = 1;
|
2020-05-24 02:27:43 +08:00
|
|
|
t.t = VT_BYTE;
|
2017-07-09 18:38:59 +08:00
|
|
|
for (;;) {
|
|
|
|
px = f->c * 8 + bit_pos;
|
|
|
|
cx = (px >> 3) & -align;
|
|
|
|
px = px - (cx << 3);
|
|
|
|
if (c0 == cx)
|
|
|
|
break;
|
|
|
|
s = (px + bit_size + 7) >> 3;
|
|
|
|
if (s > 4) {
|
|
|
|
t.t = VT_LLONG;
|
|
|
|
} else if (s > 2) {
|
|
|
|
t.t = VT_INT;
|
|
|
|
} else if (s > 1) {
|
|
|
|
t.t = VT_SHORT;
|
|
|
|
} else {
|
|
|
|
t.t = VT_BYTE;
|
|
|
|
}
|
|
|
|
s = type_size(&t, &align);
|
|
|
|
c0 = cx;
|
|
|
|
}
|
|
|
|
|
2021-01-24 23:20:48 +08:00
|
|
|
if (px + bit_size <= s * 8 && cx + s <= c
|
|
|
|
#ifdef TCC_TARGET_ARM
|
|
|
|
&& !(cx & (align - 1))
|
|
|
|
#endif
|
|
|
|
) {
|
2017-07-09 18:38:59 +08:00
|
|
|
/* update offset and bit position */
|
|
|
|
f->c = cx;
|
|
|
|
bit_pos = px;
|
|
|
|
f->type.t = (f->type.t & ~(0x3f << VT_STRUCT_SHIFT))
|
|
|
|
| (bit_pos << VT_STRUCT_SHIFT);
|
|
|
|
if (s != size)
|
|
|
|
f->auxtype = t.t;
|
2017-07-15 01:26:01 +08:00
|
|
|
#ifdef BF_DEBUG
|
|
|
|
printf("FIX field %s offset %-2d size %-2d align %-2d "
|
|
|
|
"pos %-2d bits %-2d\n",
|
2021-10-22 13:39:54 +08:00
|
|
|
get_tok_str(f->v & ~SYM_FIELD, NULL),
|
2017-07-15 01:26:01 +08:00
|
|
|
cx, s, align, px, bit_size);
|
|
|
|
#endif
|
2017-07-09 18:38:59 +08:00
|
|
|
} else {
|
|
|
|
/* fall back to load/store single-byte wise */
|
|
|
|
f->auxtype = VT_STRUCT;
|
|
|
|
#ifdef BF_DEBUG
|
2017-07-15 01:26:01 +08:00
|
|
|
printf("FIX field %s : load byte-wise\n",
|
2021-10-22 13:39:54 +08:00
|
|
|
get_tok_str(f->v & ~SYM_FIELD, NULL));
|
2017-07-09 18:38:59 +08:00
|
|
|
#endif
|
2017-07-15 01:26:01 +08:00
|
|
|
}
|
2017-07-09 18:38:59 +08:00
|
|
|
}
|
2016-10-03 03:37:58 +08:00
|
|
|
}
|
|
|
|
|
2022-12-10 08:12:44 +08:00
|
|
|
static void do_Static_assert(void);
|
|
|
|
|
2017-07-09 18:38:25 +08:00
|
|
|
/* enum/struct/union declaration. u is VT_ENUM/VT_STRUCT/VT_UNION */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void struct_decl(CType *type, int u)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2017-07-09 18:38:59 +08:00
|
|
|
int v, c, size, align, flexible;
|
2016-11-29 03:54:59 +08:00
|
|
|
int bit_size, bsize, bt;
|
2016-10-03 03:37:58 +08:00
|
|
|
Sym *s, *ss, **ps;
|
2017-07-09 18:34:11 +08:00
|
|
|
AttributeDef ad, ad1;
|
2009-05-06 02:18:10 +08:00
|
|
|
CType type1, btype;
|
|
|
|
|
2017-07-09 18:34:11 +08:00
|
|
|
memset(&ad, 0, sizeof ad);
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
parse_attribute(&ad);
|
|
|
|
if (tok != '{') {
|
|
|
|
v = tok;
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* struct already defined ? return it */
|
|
|
|
if (v < TOK_IDENT)
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("struct/union/enum name");
|
|
|
|
s = struct_find(v);
|
|
|
|
if (s && (s->sym_scope == local_scope || tok != '{')) {
|
2017-07-09 18:38:25 +08:00
|
|
|
if (u == s->type.t)
|
|
|
|
goto do_decl;
|
|
|
|
if (u == VT_ENUM && IS_ENUM(s->type.t))
|
|
|
|
goto do_decl;
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("redefinition of '%s'", get_tok_str(v, NULL));
|
2016-03-24 22:44:01 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
v = anon_sym++;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2016-07-31 12:18:45 +08:00
|
|
|
/* Record the original enum/struct/union token. */
|
2017-07-11 04:20:34 +08:00
|
|
|
type1.t = u == VT_ENUM ? u | VT_INT | VT_UNSIGNED : u;
|
2013-07-24 23:06:13 +08:00
|
|
|
type1.ref = NULL;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* we put an undefined size for struct/union */
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_push(v | SYM_STRUCT, &type1, 0, -1);
|
2009-05-06 02:18:10 +08:00
|
|
|
s->r = 0; /* default alignment is zero as gcc */
|
2017-07-09 18:34:11 +08:00
|
|
|
do_decl:
|
2017-07-09 18:38:25 +08:00
|
|
|
type->t = s->type.t;
|
2009-05-06 02:18:10 +08:00
|
|
|
type->ref = s;
|
2017-07-09 18:34:11 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '{') {
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
if (s->c != -1)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("struct/union/enum already defined");
|
2019-04-07 09:15:05 +08:00
|
|
|
s->c = -2;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* cannot be empty */
|
|
|
|
/* non empty enums are not allowed */
|
2017-07-09 18:38:25 +08:00
|
|
|
ps = &s->next;
|
2017-07-09 18:34:11 +08:00
|
|
|
if (u == VT_ENUM) {
|
2017-07-09 18:38:25 +08:00
|
|
|
long long ll = 0, pl = 0, nl = 0;
|
|
|
|
CType t;
|
|
|
|
t.ref = s;
|
|
|
|
/* enum symbols have static storage */
|
|
|
|
t.t = VT_INT|VT_STATIC|VT_ENUM_VAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
for(;;) {
|
2021-10-22 13:39:54 +08:00
|
|
|
v = tok;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (v < TOK_UIDENT)
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("identifier");
|
|
|
|
ss = sym_find(v);
|
|
|
|
if (ss && !local_stack)
|
|
|
|
tcc_error("redefinition of enumerator '%s'",
|
|
|
|
get_tok_str(v, NULL));
|
|
|
|
next();
|
|
|
|
if (tok == '=') {
|
|
|
|
next();
|
|
|
|
ll = expr_const64();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
ss = sym_push(v, &t, VT_CONST, 0);
|
2017-07-09 18:38:25 +08:00
|
|
|
ss->enum_val = ll;
|
|
|
|
*ps = ss, ps = &ss->next;
|
|
|
|
if (ll < nl)
|
|
|
|
nl = ll;
|
|
|
|
if (ll > pl)
|
|
|
|
pl = ll;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != ',')
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2017-07-09 18:38:25 +08:00
|
|
|
ll++;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* NOTE: we accept a trailing comma */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '}')
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('}');
|
2017-07-09 18:38:25 +08:00
|
|
|
/* set integral type of the enum */
|
|
|
|
t.t = VT_INT;
|
2017-07-11 04:20:34 +08:00
|
|
|
if (nl >= 0) {
|
2017-07-09 18:38:25 +08:00
|
|
|
if (pl != (unsigned)pl)
|
2017-09-25 00:57:48 +08:00
|
|
|
t.t = (LONG_SIZE==8 ? VT_LLONG|VT_LONG : VT_LLONG);
|
2017-07-09 18:38:25 +08:00
|
|
|
t.t |= VT_UNSIGNED;
|
|
|
|
} else if (pl != (int)pl || nl != (int)nl)
|
2017-09-25 00:57:48 +08:00
|
|
|
t.t = (LONG_SIZE==8 ? VT_LLONG|VT_LONG : VT_LLONG);
|
2017-07-09 18:38:25 +08:00
|
|
|
s->type.t = type->t = t.t | VT_ENUM;
|
|
|
|
s->c = 0;
|
|
|
|
/* set type for enum members */
|
|
|
|
for (ss = s->next; ss; ss = ss->next) {
|
|
|
|
ll = ss->enum_val;
|
|
|
|
if (ll == (int)ll) /* default is int if it fits */
|
|
|
|
continue;
|
|
|
|
if (t.t & VT_UNSIGNED) {
|
|
|
|
ss->type.t |= VT_UNSIGNED;
|
|
|
|
if (ll == (unsigned)ll)
|
|
|
|
continue;
|
|
|
|
}
|
2017-09-25 00:57:48 +08:00
|
|
|
ss->type.t = (ss->type.t & ~VT_BTYPE)
|
|
|
|
| (LONG_SIZE==8 ? VT_LLONG|VT_LONG : VT_LLONG);
|
2017-07-09 18:38:25 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2017-07-09 18:38:25 +08:00
|
|
|
c = 0;
|
2013-10-06 20:43:16 +08:00
|
|
|
flexible = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
while (tok != '}') {
|
2022-12-10 08:12:44 +08:00
|
|
|
if (tok == TOK_STATIC_ASSERT) {
|
|
|
|
do_Static_assert();
|
|
|
|
continue;
|
|
|
|
}
|
2022-07-04 21:24:46 +08:00
|
|
|
if (!parse_btype(&btype, &ad1, 0)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(';');
|
2016-06-28 21:09:40 +08:00
|
|
|
continue;
|
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
while (1) {
|
2015-07-30 04:53:57 +08:00
|
|
|
if (flexible)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("flexible array member '%s' not at the end of struct",
|
|
|
|
get_tok_str(v, NULL));
|
2009-05-06 02:18:10 +08:00
|
|
|
bit_size = -1;
|
|
|
|
v = 0;
|
|
|
|
type1 = btype;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != ':') {
|
|
|
|
if (tok != ';')
|
|
|
|
type_decl(&type1, &ad1, &v, TYPE_DIRECT);
|
2015-04-10 11:31:58 +08:00
|
|
|
if (v == 0) {
|
2015-07-30 04:53:57 +08:00
|
|
|
if ((type1.t & VT_BTYPE) != VT_STRUCT)
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("identifier");
|
2015-07-30 04:53:57 +08:00
|
|
|
else {
|
|
|
|
int v = btype.ref->v;
|
|
|
|
if (!(v & SYM_FIELD) && (v & ~SYM_STRUCT) < SYM_FIRST_ANOM) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->ms_extensions == 0)
|
|
|
|
expect("identifier");
|
2015-07-30 04:53:57 +08:00
|
|
|
}
|
|
|
|
}
|
2015-04-10 11:31:58 +08:00
|
|
|
}
|
2013-10-06 20:43:16 +08:00
|
|
|
if (type_size(&type1, &align) < 0) {
|
2017-07-09 18:34:11 +08:00
|
|
|
if ((u == VT_STRUCT) && (type1.t & VT_ARRAY) && c)
|
2015-07-30 04:53:57 +08:00
|
|
|
flexible = 1;
|
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("field '%s' has incomplete type",
|
|
|
|
get_tok_str(v, NULL));
|
2013-10-06 20:43:16 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
if ((type1.t & VT_BTYPE) == VT_FUNC ||
|
2018-08-04 04:39:00 +08:00
|
|
|
(type1.t & VT_BTYPE) == VT_VOID ||
|
2017-03-11 09:13:59 +08:00
|
|
|
(type1.t & VT_STORAGE))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("invalid type for '%s'",
|
|
|
|
get_tok_str(v, NULL));
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ':') {
|
|
|
|
next();
|
|
|
|
bit_size = expr_const();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* XXX: handle v = 0 case for messages */
|
|
|
|
if (bit_size < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("negative width in bit-field '%s'",
|
|
|
|
get_tok_str(v, NULL));
|
2009-05-06 02:18:10 +08:00
|
|
|
if (v && bit_size == 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("zero width for bit-field '%s'",
|
|
|
|
get_tok_str(v, NULL));
|
|
|
|
parse_attribute(&ad1);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
size = type_size(&type1, &align);
|
|
|
|
if (bit_size >= 0) {
|
|
|
|
bt = type1.t & VT_BTYPE;
|
2015-07-30 04:53:57 +08:00
|
|
|
if (bt != VT_INT &&
|
|
|
|
bt != VT_BYTE &&
|
2009-05-06 02:18:10 +08:00
|
|
|
bt != VT_SHORT &&
|
|
|
|
bt != VT_BOOL &&
|
|
|
|
bt != VT_LLONG)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("bitfields must have scalar type");
|
2009-05-06 02:18:10 +08:00
|
|
|
bsize = size * 8;
|
|
|
|
if (bit_size > bsize) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("width of '%s' exceeds its type",
|
|
|
|
get_tok_str(v, NULL));
|
2017-07-09 18:38:59 +08:00
|
|
|
} else if (bit_size == bsize
|
|
|
|
&& !ad.a.packed && !ad1.a.packed) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* no need for bit fields */
|
2016-11-29 03:54:59 +08:00
|
|
|
;
|
2017-07-09 18:38:59 +08:00
|
|
|
} else if (bit_size == 64) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("field width 64 not implemented");
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2017-07-09 18:38:25 +08:00
|
|
|
type1.t = (type1.t & ~VT_STRUCT_MASK)
|
|
|
|
| VT_BITFIELD
|
|
|
|
| (bit_size << (VT_STRUCT_SHIFT + 6));
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (v != 0 || (type1.t & VT_BTYPE) == VT_STRUCT) {
|
2016-10-03 03:37:58 +08:00
|
|
|
/* Remember we've seen a real field to check
|
|
|
|
for placement of flexible array member. */
|
|
|
|
c = 1;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2016-11-29 03:54:59 +08:00
|
|
|
/* If member is a struct or bit-field, enforce
|
|
|
|
placing into the struct (as anonymous). */
|
|
|
|
if (v == 0 &&
|
|
|
|
((type1.t & VT_BTYPE) == VT_STRUCT ||
|
|
|
|
bit_size >= 0)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
v = anon_sym++;
|
2016-08-02 04:11:49 +08:00
|
|
|
}
|
|
|
|
if (v) {
|
2021-10-22 13:39:54 +08:00
|
|
|
ss = sym_push(v | SYM_FIELD, &type1, 0, 0);
|
2017-07-09 18:38:59 +08:00
|
|
|
ss->a = ad1.a;
|
2009-05-06 02:18:10 +08:00
|
|
|
*ps = ss;
|
|
|
|
ps = &ss->next;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ';' || tok == TOK_EOF)
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(',');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(';');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('}');
|
|
|
|
parse_attribute(&ad);
|
2020-02-19 04:11:49 +08:00
|
|
|
if (ad.cleanup_func) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("attribute '__cleanup__' ignored on type");
|
2020-02-10 01:21:59 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
check_fields(type, 1);
|
|
|
|
check_fields(type, 0);
|
|
|
|
struct_layout(type, &ad);
|
2022-05-07 12:54:13 +08:00
|
|
|
if (debug_modes)
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_debug_fix_anon(tcc_state, type);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-14 23:42:48 +08:00
|
|
|
static void sym_to_attr(AttributeDef *ad, Sym *s)
|
|
|
|
{
|
2019-01-01 05:00:31 +08:00
|
|
|
merge_symattr(&ad->a, &s->a);
|
|
|
|
merge_funcattr(&ad->f, &s->f);
|
2017-07-14 23:42:48 +08:00
|
|
|
}
|
|
|
|
|
2016-01-11 15:51:58 +08:00
|
|
|
/* Add type qualifiers to a type. If the type is an array then the qualifiers
|
|
|
|
are added to the element type, copied because it could be a typedef. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void parse_btype_qualify(CType *type, int qualifiers)
|
2016-01-11 15:51:58 +08:00
|
|
|
{
|
|
|
|
while (type->t & VT_ARRAY) {
|
2021-10-22 13:39:54 +08:00
|
|
|
type->ref = sym_push(SYM_FIELD, &type->ref->type, 0, type->ref->c);
|
2016-01-11 15:51:58 +08:00
|
|
|
type = &type->ref->type;
|
|
|
|
}
|
|
|
|
type->t |= qualifiers;
|
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* return 0 if no type declaration. otherwise, return the basic type
|
2015-07-30 04:53:57 +08:00
|
|
|
and skip it.
|
2009-05-06 02:18:10 +08:00
|
|
|
*/
|
2022-07-04 21:24:46 +08:00
|
|
|
static int parse_btype(CType *type, AttributeDef *ad, int ignore_label)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2019-04-29 19:53:07 +08:00
|
|
|
int t, u, bt, st, type_found, typespec_found, g, n;
|
2009-05-06 02:18:10 +08:00
|
|
|
Sym *s;
|
|
|
|
CType type1;
|
|
|
|
|
|
|
|
memset(ad, 0, sizeof(AttributeDef));
|
|
|
|
type_found = 0;
|
|
|
|
typespec_found = 0;
|
2017-07-09 18:34:11 +08:00
|
|
|
t = VT_INT;
|
|
|
|
bt = st = -1;
|
|
|
|
type->ref = NULL;
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
while(1) {
|
2021-10-22 13:39:54 +08:00
|
|
|
switch(tok) {
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_EXTENSION:
|
|
|
|
/* currently, we really ignore extension */
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* basic types */
|
|
|
|
case TOK_CHAR:
|
|
|
|
u = VT_BYTE;
|
|
|
|
basic_type:
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
basic_type1:
|
2017-07-26 04:04:24 +08:00
|
|
|
if (u == VT_SHORT || u == VT_LONG) {
|
2017-07-09 18:34:11 +08:00
|
|
|
if (st != -1 || (bt != -1 && bt != VT_INT))
|
2021-10-22 13:39:54 +08:00
|
|
|
tmbt: tcc_error("too many basic types");
|
2017-07-09 18:34:11 +08:00
|
|
|
st = u;
|
|
|
|
} else {
|
|
|
|
if (bt != -1 || (st != -1 && u != VT_INT))
|
|
|
|
goto tmbt;
|
|
|
|
bt = u;
|
|
|
|
}
|
|
|
|
if (u != VT_INT)
|
2017-09-25 00:57:48 +08:00
|
|
|
t = (t & ~(VT_BTYPE|VT_LONG)) | u;
|
2009-05-06 02:18:10 +08:00
|
|
|
typespec_found = 1;
|
|
|
|
break;
|
|
|
|
case TOK_VOID:
|
|
|
|
u = VT_VOID;
|
|
|
|
goto basic_type;
|
|
|
|
case TOK_SHORT:
|
|
|
|
u = VT_SHORT;
|
|
|
|
goto basic_type;
|
|
|
|
case TOK_INT:
|
2014-03-24 23:28:56 +08:00
|
|
|
u = VT_INT;
|
|
|
|
goto basic_type;
|
2019-03-21 03:03:27 +08:00
|
|
|
case TOK_ALIGNAS:
|
|
|
|
{ int n;
|
2019-04-09 02:58:49 +08:00
|
|
|
AttributeDef ad1;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
skip('(');
|
2019-04-09 02:58:49 +08:00
|
|
|
memset(&ad1, 0, sizeof(AttributeDef));
|
2022-07-04 21:24:46 +08:00
|
|
|
if (parse_btype(&type1, &ad1, 0)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
type_decl(&type1, &ad1, &n, TYPE_ABSTRACT);
|
2019-04-09 02:58:49 +08:00
|
|
|
if (ad1.a.aligned)
|
|
|
|
n = 1 << (ad1.a.aligned - 1);
|
|
|
|
else
|
|
|
|
type_size(&type1, &n);
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
n = expr_const();
|
2022-08-04 18:02:52 +08:00
|
|
|
if (n < 0 || (n & (n - 1)) != 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("alignment must be a positive power of two");
|
2019-04-09 02:58:49 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2019-04-09 02:58:49 +08:00
|
|
|
ad->a.aligned = exact_log2p1(n);
|
2019-03-21 03:03:27 +08:00
|
|
|
}
|
2019-04-09 02:58:49 +08:00
|
|
|
continue;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_LONG:
|
|
|
|
if ((t & VT_BTYPE) == VT_DOUBLE) {
|
2017-09-25 00:57:48 +08:00
|
|
|
t = (t & ~(VT_BTYPE|VT_LONG)) | VT_LDOUBLE;
|
|
|
|
} else if ((t & (VT_BTYPE|VT_LONG)) == VT_LONG) {
|
|
|
|
t = (t & ~(VT_BTYPE|VT_LONG)) | VT_LLONG;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2017-07-26 04:04:24 +08:00
|
|
|
u = VT_LONG;
|
|
|
|
goto basic_type;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2015-02-14 02:58:31 +08:00
|
|
|
#ifdef TCC_TARGET_ARM64
|
|
|
|
case TOK_UINT128:
|
|
|
|
/* GCC's __uint128_t appears in some Linux header files. Make it a
|
|
|
|
synonym for long double to get the size and alignment right. */
|
|
|
|
u = VT_LDOUBLE;
|
|
|
|
goto basic_type;
|
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_BOOL:
|
|
|
|
u = VT_BOOL;
|
|
|
|
goto basic_type;
|
2022-10-19 20:06:04 +08:00
|
|
|
case TOK_COMPLEX:
|
|
|
|
tcc_error("_Complex is not yet supported");
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_FLOAT:
|
|
|
|
u = VT_FLOAT;
|
|
|
|
goto basic_type;
|
|
|
|
case TOK_DOUBLE:
|
2017-09-25 00:57:48 +08:00
|
|
|
if ((t & (VT_BTYPE|VT_LONG)) == VT_LONG) {
|
|
|
|
t = (t & ~(VT_BTYPE|VT_LONG)) | VT_LDOUBLE;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
u = VT_DOUBLE;
|
2017-07-09 18:34:11 +08:00
|
|
|
goto basic_type;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_ENUM:
|
2021-10-22 13:39:54 +08:00
|
|
|
struct_decl(&type1, VT_ENUM);
|
2009-05-06 02:18:10 +08:00
|
|
|
basic_type2:
|
|
|
|
u = type1.t;
|
|
|
|
type->ref = type1.ref;
|
|
|
|
goto basic_type1;
|
|
|
|
case TOK_STRUCT:
|
2021-10-22 13:39:54 +08:00
|
|
|
struct_decl(&type1, VT_STRUCT);
|
2017-07-09 18:34:11 +08:00
|
|
|
goto basic_type2;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_UNION:
|
2021-10-22 13:39:54 +08:00
|
|
|
struct_decl(&type1, VT_UNION);
|
2009-05-06 02:18:10 +08:00
|
|
|
goto basic_type2;
|
|
|
|
|
|
|
|
/* type modifiers */
|
2021-01-26 21:29:07 +08:00
|
|
|
case TOK__Atomic:
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2021-01-26 21:29:07 +08:00
|
|
|
type->t = t;
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_btype_qualify(type, VT_ATOMIC);
|
2021-01-26 21:29:07 +08:00
|
|
|
t = type->t;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '(') {
|
|
|
|
parse_expr_type(&type1);
|
2021-01-26 21:29:07 +08:00
|
|
|
/* remove all storage modifiers except typedef */
|
|
|
|
type1.t &= ~(VT_STORAGE&~VT_TYPEDEF);
|
|
|
|
if (type1.ref)
|
|
|
|
sym_to_attr(ad, type1.ref);
|
|
|
|
goto basic_type2;
|
|
|
|
}
|
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_CONST1:
|
|
|
|
case TOK_CONST2:
|
|
|
|
case TOK_CONST3:
|
2016-01-11 15:51:58 +08:00
|
|
|
type->t = t;
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_btype_qualify(type, VT_CONSTANT);
|
2016-01-11 15:51:58 +08:00
|
|
|
t = type->t;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_VOLATILE1:
|
|
|
|
case TOK_VOLATILE2:
|
|
|
|
case TOK_VOLATILE3:
|
2016-01-11 15:51:58 +08:00
|
|
|
type->t = t;
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_btype_qualify(type, VT_VOLATILE);
|
2016-01-11 15:51:58 +08:00
|
|
|
t = type->t;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_SIGNED1:
|
|
|
|
case TOK_SIGNED2:
|
|
|
|
case TOK_SIGNED3:
|
2014-02-06 20:51:47 +08:00
|
|
|
if ((t & (VT_DEFSIGN|VT_UNSIGNED)) == (VT_DEFSIGN|VT_UNSIGNED))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("signed and unsigned modifier");
|
2014-02-06 20:51:47 +08:00
|
|
|
t |= VT_DEFSIGN;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2017-07-09 18:34:11 +08:00
|
|
|
typespec_found = 1;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_REGISTER:
|
|
|
|
case TOK_AUTO:
|
|
|
|
case TOK_RESTRICT1:
|
|
|
|
case TOK_RESTRICT2:
|
|
|
|
case TOK_RESTRICT3:
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_UNSIGNED:
|
2014-02-06 20:51:47 +08:00
|
|
|
if ((t & (VT_DEFSIGN|VT_UNSIGNED)) == VT_DEFSIGN)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("signed and unsigned modifier");
|
2014-02-06 20:51:47 +08:00
|
|
|
t |= VT_DEFSIGN | VT_UNSIGNED;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
typespec_found = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* storage */
|
|
|
|
case TOK_EXTERN:
|
2017-04-04 14:34:52 +08:00
|
|
|
g = VT_EXTERN;
|
|
|
|
goto storage;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_STATIC:
|
2017-04-04 14:34:52 +08:00
|
|
|
g = VT_STATIC;
|
|
|
|
goto storage;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_TYPEDEF:
|
2017-04-04 14:34:52 +08:00
|
|
|
g = VT_TYPEDEF;
|
|
|
|
goto storage;
|
|
|
|
storage:
|
|
|
|
if (t & (VT_EXTERN|VT_STATIC|VT_TYPEDEF) & ~g)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("multiple storage classes");
|
2017-04-04 14:34:52 +08:00
|
|
|
t |= g;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_INLINE1:
|
|
|
|
case TOK_INLINE2:
|
|
|
|
case TOK_INLINE3:
|
|
|
|
t |= VT_INLINE;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2019-03-21 03:03:27 +08:00
|
|
|
case TOK_NORETURN3:
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2019-09-08 23:35:15 +08:00
|
|
|
ad->f.func_noreturn = 1;
|
2019-03-21 03:03:27 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* GNUC attribute */
|
|
|
|
case TOK_ATTRIBUTE1:
|
|
|
|
case TOK_ATTRIBUTE2:
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_attribute(ad);
|
2017-07-09 18:34:11 +08:00
|
|
|
if (ad->attr_mode) {
|
|
|
|
u = ad->attr_mode -1;
|
2017-09-25 00:57:48 +08:00
|
|
|
t = (t & ~(VT_BTYPE|VT_LONG)) | u;
|
2010-01-27 05:56:22 +08:00
|
|
|
}
|
2018-07-29 00:55:54 +08:00
|
|
|
continue;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* GNUC typeof */
|
|
|
|
case TOK_TYPEOF1:
|
|
|
|
case TOK_TYPEOF2:
|
|
|
|
case TOK_TYPEOF3:
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
parse_expr_type(&type1);
|
2011-03-07 14:32:35 +08:00
|
|
|
/* remove all storage modifiers except typedef */
|
|
|
|
type1.t &= ~(VT_STORAGE&~VT_TYPEDEF);
|
2017-07-14 23:42:48 +08:00
|
|
|
if (type1.ref)
|
|
|
|
sym_to_attr(ad, type1.ref);
|
2009-05-06 02:18:10 +08:00
|
|
|
goto basic_type2;
|
2022-12-04 03:09:11 +08:00
|
|
|
case TOK_THREAD_LOCAL:
|
|
|
|
tcc_error("_Thread_local is not implemented");
|
2009-05-06 02:18:10 +08:00
|
|
|
default:
|
2014-03-24 23:28:56 +08:00
|
|
|
if (typespec_found)
|
2009-05-06 02:18:10 +08:00
|
|
|
goto the_end;
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_find(tok);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (!s || !(s->type.t & VT_TYPEDEF))
|
|
|
|
goto the_end;
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
n = tok, next();
|
2022-07-04 21:24:46 +08:00
|
|
|
if (tok == ':' && ignore_label) {
|
2019-04-29 19:53:07 +08:00
|
|
|
/* ignore if it's a label */
|
2021-10-22 13:39:54 +08:00
|
|
|
unget_tok(n);
|
2019-04-29 19:53:07 +08:00
|
|
|
goto the_end;
|
|
|
|
}
|
|
|
|
|
2017-09-25 00:57:48 +08:00
|
|
|
t &= ~(VT_BTYPE|VT_LONG);
|
2017-07-09 18:34:11 +08:00
|
|
|
u = t & ~(VT_CONSTANT | VT_VOLATILE), t ^= u;
|
|
|
|
type->t = (s->type.t & ~VT_TYPEDEF) | u;
|
2016-01-11 15:51:58 +08:00
|
|
|
type->ref = s->type.ref;
|
2017-07-09 18:34:11 +08:00
|
|
|
if (t)
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_btype_qualify(type, t);
|
2016-01-11 15:51:58 +08:00
|
|
|
t = type->t;
|
2017-07-09 18:34:11 +08:00
|
|
|
/* get attributes from typedef */
|
2017-07-14 23:42:48 +08:00
|
|
|
sym_to_attr(ad, s);
|
2009-05-06 02:18:10 +08:00
|
|
|
typespec_found = 1;
|
2017-07-09 18:34:11 +08:00
|
|
|
st = bt = -2;
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
type_found = 1;
|
|
|
|
}
|
|
|
|
the_end:
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->char_is_unsigned) {
|
2014-02-06 20:51:47 +08:00
|
|
|
if ((t & (VT_DEFSIGN|VT_BTYPE)) == VT_BYTE)
|
2009-05-06 02:18:10 +08:00
|
|
|
t |= VT_UNSIGNED;
|
|
|
|
}
|
2017-09-25 00:57:48 +08:00
|
|
|
/* VT_LONG is used just as a modifier for VT_INT / VT_LLONG */
|
|
|
|
bt = t & (VT_BTYPE|VT_LONG);
|
|
|
|
if (bt == VT_LONG)
|
|
|
|
t |= LONG_SIZE == 8 ? VT_LLONG : VT_INT;
|
2020-12-23 04:10:22 +08:00
|
|
|
#ifdef TCC_USING_DOUBLE_FOR_LDOUBLE
|
2017-09-25 00:57:48 +08:00
|
|
|
if (bt == VT_LDOUBLE)
|
2020-02-23 07:11:03 +08:00
|
|
|
t = (t & ~(VT_BTYPE|VT_LONG)) | (VT_DOUBLE|VT_LONG);
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
|
|
|
type->t = t;
|
|
|
|
return type_found;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* convert a function parameter type (array to pointer and function to
|
|
|
|
function pointer) */
|
2021-10-22 13:39:54 +08:00
|
|
|
static inline void convert_parameter_type(CType *pt)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
/* remove const and volatile qualifiers (XXX: const could be used
|
|
|
|
to indicate a const function parameter */
|
|
|
|
pt->t &= ~(VT_CONSTANT | VT_VOLATILE);
|
|
|
|
/* array must be transformed to pointer according to ANSI C */
|
|
|
|
pt->t &= ~VT_ARRAY;
|
|
|
|
if ((pt->t & VT_BTYPE) == VT_FUNC) {
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(pt);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void parse_asm_str(CString *astr)
|
2010-09-15 01:17:39 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
parse_mult_str(astr, "string constant");
|
2010-09-15 01:17:39 +08:00
|
|
|
}
|
|
|
|
|
2015-11-20 18:22:56 +08:00
|
|
|
/* Parse an asm label and return the token */
|
2021-10-22 13:39:54 +08:00
|
|
|
static int asm_label_instr(void)
|
2010-09-15 01:17:39 +08:00
|
|
|
{
|
2015-11-20 18:22:56 +08:00
|
|
|
int v;
|
|
|
|
CString astr;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
parse_asm_str(&astr);
|
|
|
|
skip(')');
|
2010-09-15 01:17:39 +08:00
|
|
|
#ifdef ASM_DEBUG
|
2015-11-20 18:22:56 +08:00
|
|
|
printf("asm_alias: \"%s\"\n", (char *)astr.data);
|
2010-09-15 01:17:39 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
v = tok_alloc(astr.data, astr.size - 1)->tok;
|
|
|
|
cstr_free(&astr);
|
2015-11-20 18:22:56 +08:00
|
|
|
return v;
|
2010-09-15 01:17:39 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static int post_type(CType *type, AttributeDef *ad, int storage, int td)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2022-08-20 18:58:56 +08:00
|
|
|
int n, l, t1, arg_size, align;
|
2009-05-06 02:18:10 +08:00
|
|
|
Sym **plast, *s, *first;
|
|
|
|
AttributeDef ad1;
|
|
|
|
CType pt;
|
2022-03-21 18:40:43 +08:00
|
|
|
TokenString *vla_array_tok = NULL;
|
|
|
|
int *vla_array_str = NULL;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '(') {
|
2017-03-06 10:25:33 +08:00
|
|
|
/* function type, or recursive declarator (return if so) */
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2022-03-02 05:00:42 +08:00
|
|
|
if (TYPE_DIRECT == (td & (TYPE_DIRECT|TYPE_ABSTRACT)))
|
2017-03-06 10:25:33 +08:00
|
|
|
return 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ')')
|
2017-03-06 10:25:33 +08:00
|
|
|
l = 0;
|
2022-07-04 21:24:46 +08:00
|
|
|
else if (parse_btype(&pt, &ad1, 0))
|
2017-03-06 10:25:33 +08:00
|
|
|
l = FUNC_NEW;
|
2022-03-02 05:00:42 +08:00
|
|
|
else if (td & (TYPE_DIRECT|TYPE_ABSTRACT)) {
|
2019-01-01 05:00:31 +08:00
|
|
|
merge_attr (ad, &ad1);
|
|
|
|
return 0;
|
|
|
|
} else
|
2017-03-06 10:25:33 +08:00
|
|
|
l = FUNC_OLD;
|
2022-03-02 05:00:42 +08:00
|
|
|
|
2011-03-09 04:56:13 +08:00
|
|
|
first = NULL;
|
|
|
|
plast = &first;
|
|
|
|
arg_size = 0;
|
2022-03-02 05:00:42 +08:00
|
|
|
++local_scope;
|
2017-03-06 10:25:33 +08:00
|
|
|
if (l) {
|
2011-03-09 04:56:13 +08:00
|
|
|
for(;;) {
|
|
|
|
/* read param name and compute offset */
|
|
|
|
if (l != FUNC_OLD) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((pt.t & VT_BTYPE) == VT_VOID && tok == ')')
|
2011-03-09 04:56:13 +08:00
|
|
|
break;
|
2022-03-02 05:00:42 +08:00
|
|
|
type_decl(&pt, &ad1, &n, TYPE_DIRECT | TYPE_ABSTRACT | TYPE_PARAM);
|
2011-03-09 04:56:13 +08:00
|
|
|
if ((pt.t & VT_BTYPE) == VT_VOID)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("parameter declared as void");
|
2022-03-02 05:00:42 +08:00
|
|
|
if (n == 0)
|
|
|
|
n = SYM_FIELD;
|
2011-03-09 04:56:13 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
n = tok;
|
2017-03-11 09:13:59 +08:00
|
|
|
pt.t = VT_VOID; /* invalid type */
|
2018-06-08 21:31:40 +08:00
|
|
|
pt.ref = NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2022-03-02 05:00:42 +08:00
|
|
|
if (n < TOK_UIDENT)
|
|
|
|
expect("identifier");
|
2021-10-22 13:39:54 +08:00
|
|
|
convert_parameter_type(&pt);
|
2018-06-01 05:51:51 +08:00
|
|
|
arg_size += (type_size(&pt, &align) + PTR_SIZE - 1) / PTR_SIZE;
|
2022-08-20 18:58:56 +08:00
|
|
|
/* these symbols may be evaluated for VLArrays (see below, under
|
|
|
|
nocode_wanted) which is why we push them here as normal symbols
|
|
|
|
temporarily. Example: int func(int a, int b[++a]); */
|
|
|
|
s = sym_push(n, &pt, VT_LOCAL|VT_LVAL, 0);
|
2011-03-09 04:56:13 +08:00
|
|
|
*plast = s;
|
|
|
|
plast = &s->next;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ')')
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(',');
|
|
|
|
if (l == FUNC_NEW && tok == TOK_DOTS) {
|
2011-03-09 04:56:13 +08:00
|
|
|
l = FUNC_ELLIPSIS;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2011-03-09 04:56:13 +08:00
|
|
|
break;
|
|
|
|
}
|
2022-07-04 21:24:46 +08:00
|
|
|
if (l == FUNC_NEW && !parse_btype(&pt, &ad1, 0))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("invalid type");
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-03-06 10:25:33 +08:00
|
|
|
} else
|
|
|
|
/* if no parameters, then old type prototype */
|
2011-03-09 04:56:13 +08:00
|
|
|
l = FUNC_OLD;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2022-03-02 05:00:42 +08:00
|
|
|
/* remove parameter symbols from token table, keep on stack */
|
|
|
|
if (first) {
|
|
|
|
sym_pop(local_stack ? &local_stack : &global_stack, first->prev, 1);
|
|
|
|
for (s = first; s; s = s->next)
|
|
|
|
s->v |= SYM_FIELD;
|
|
|
|
}
|
|
|
|
--local_scope;
|
2011-03-09 04:56:13 +08:00
|
|
|
/* NOTE: const is ignored in returned type as it has a special
|
|
|
|
meaning in gcc / C++ */
|
2015-07-30 04:53:57 +08:00
|
|
|
type->t &= ~VT_CONSTANT;
|
2011-03-09 04:56:13 +08:00
|
|
|
/* some ancient pre-K&R C allows a function to return an array
|
2015-07-30 04:53:57 +08:00
|
|
|
and the array brackets to be put after the arguments, such
|
2011-03-09 07:12:09 +08:00
|
|
|
that "int c()[]" means something like "int[] c()" */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '[') {
|
|
|
|
next();
|
|
|
|
skip(']'); /* only handle simple "[]" */
|
|
|
|
mk_pointer(type);
|
2011-03-09 07:12:09 +08:00
|
|
|
}
|
2011-03-09 04:56:13 +08:00
|
|
|
/* we push a anonymous symbol which will contain the function prototype */
|
2017-07-09 18:34:11 +08:00
|
|
|
ad->f.func_args = arg_size;
|
|
|
|
ad->f.func_type = l;
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_push(SYM_FIELD, type, 0, 0);
|
2014-01-07 21:57:07 +08:00
|
|
|
s->a = ad->a;
|
2017-07-09 18:34:11 +08:00
|
|
|
s->f = ad->f;
|
2011-03-09 04:56:13 +08:00
|
|
|
s->next = first;
|
2011-04-07 03:08:50 +08:00
|
|
|
type->t = VT_FUNC;
|
2011-03-09 04:56:13 +08:00
|
|
|
type->ref = s;
|
2021-10-22 13:39:54 +08:00
|
|
|
} else if (tok == '[') {
|
|
|
|
int saved_nocode_wanted = nocode_wanted;
|
2011-03-09 04:56:13 +08:00
|
|
|
/* array definition */
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2022-03-02 05:00:42 +08:00
|
|
|
n = -1;
|
|
|
|
t1 = 0;
|
|
|
|
if (td & TYPE_PARAM) while (1) {
|
2018-03-23 20:19:58 +08:00
|
|
|
/* XXX The optional type-quals and static should only be accepted
|
|
|
|
in parameter decls. The '*' as well, and then even only
|
|
|
|
in prototypes (not function defs). */
|
2021-10-22 13:39:54 +08:00
|
|
|
switch (tok) {
|
2018-03-23 20:19:58 +08:00
|
|
|
case TOK_RESTRICT1: case TOK_RESTRICT2: case TOK_RESTRICT3:
|
|
|
|
case TOK_CONST1:
|
|
|
|
case TOK_VOLATILE1:
|
|
|
|
case TOK_STATIC:
|
|
|
|
case '*':
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2018-03-23 20:19:58 +08:00
|
|
|
continue;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2022-03-02 05:00:42 +08:00
|
|
|
if (tok != ']') {
|
2022-03-21 18:40:43 +08:00
|
|
|
/* Code generation is not done now but has to be done
|
|
|
|
at start of function. Save code here for later use. */
|
2022-03-02 05:00:42 +08:00
|
|
|
nocode_wanted = 1;
|
2022-08-20 18:58:56 +08:00
|
|
|
skip_or_save_block(&vla_array_tok);
|
2022-03-21 18:40:43 +08:00
|
|
|
unget_tok(0);
|
|
|
|
vla_array_str = vla_array_tok->str;
|
|
|
|
begin_macro(vla_array_tok, 2);
|
|
|
|
next();
|
2022-03-18 14:26:07 +08:00
|
|
|
gexpr();
|
2022-03-21 18:40:43 +08:00
|
|
|
end_macro();
|
|
|
|
next();
|
2022-03-18 14:26:07 +08:00
|
|
|
goto check;
|
2022-03-02 05:00:42 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
} else if (tok != ']') {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!local_stack || (storage & VT_STATIC))
|
|
|
|
vpushi(expr_const());
|
2016-09-04 05:55:54 +08:00
|
|
|
else {
|
|
|
|
/* VLAs (which can only happen with local_stack && !VT_STATIC)
|
|
|
|
length must always be evaluated, even under nocode_wanted,
|
|
|
|
so that its size slot is initialized (e.g. under sizeof
|
|
|
|
or typeof). */
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted = 0;
|
|
|
|
gexpr();
|
2016-09-04 05:55:54 +08:00
|
|
|
}
|
2022-03-18 14:26:07 +08:00
|
|
|
check:
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
|
|
|
|
n = vtop->c.i;
|
2011-04-07 00:17:03 +08:00
|
|
|
if (n < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("invalid array size");
|
2011-04-07 00:17:03 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!is_integer_btype(vtop->type.t & VT_BTYPE))
|
|
|
|
tcc_error("size of variable length array should be an integer");
|
2019-04-07 10:09:25 +08:00
|
|
|
n = 0;
|
2011-04-07 03:08:50 +08:00
|
|
|
t1 = VT_VLA;
|
2011-04-07 00:17:03 +08:00
|
|
|
}
|
2011-03-09 04:56:13 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(']');
|
2011-03-09 04:56:13 +08:00
|
|
|
/* parse next post type */
|
2022-03-24 17:16:37 +08:00
|
|
|
post_type(type, ad, storage, (td & ~(TYPE_DIRECT|TYPE_ABSTRACT)) | TYPE_NEST);
|
2019-06-24 16:28:36 +08:00
|
|
|
|
|
|
|
if ((type->t & VT_BTYPE) == VT_FUNC)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("declaration of an array of functions");
|
2019-06-24 16:28:36 +08:00
|
|
|
if ((type->t & VT_BTYPE) == VT_VOID
|
2022-08-20 18:58:56 +08:00
|
|
|
|| type_size(type, &align) < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("declaration of an array of incomplete type elements");
|
2019-06-24 16:28:36 +08:00
|
|
|
|
2011-04-07 00:17:03 +08:00
|
|
|
t1 |= type->t & VT_VLA;
|
2019-06-24 16:28:36 +08:00
|
|
|
|
2011-04-10 13:52:25 +08:00
|
|
|
if (t1 & VT_VLA) {
|
2022-03-24 17:16:37 +08:00
|
|
|
if (n < 0) {
|
|
|
|
if (td & TYPE_NEST)
|
|
|
|
tcc_error("need explicit inner array size in VLAs");
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
loc -= type_size(&int_type, &align);
|
|
|
|
loc &= -align;
|
|
|
|
n = loc;
|
|
|
|
|
|
|
|
vpush_type_size(type, &align);
|
|
|
|
gen_op('*');
|
|
|
|
vset(&int_type, VT_LOCAL|VT_LVAL, n);
|
|
|
|
vswap();
|
|
|
|
vstore();
|
|
|
|
}
|
2011-04-10 13:52:25 +08:00
|
|
|
}
|
|
|
|
if (n != -1)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
|
|
|
nocode_wanted = saved_nocode_wanted;
|
2015-07-30 04:53:57 +08:00
|
|
|
|
2011-04-07 00:17:03 +08:00
|
|
|
/* we push an anonymous symbol which will contain the array
|
2011-03-09 04:56:13 +08:00
|
|
|
element type */
|
2021-10-22 13:39:54 +08:00
|
|
|
s = sym_push(SYM_FIELD, type, 0, n);
|
2011-04-08 16:09:39 +08:00
|
|
|
type->t = (t1 ? VT_VLA : VT_ARRAY) | VT_PTR;
|
2011-03-09 04:56:13 +08:00
|
|
|
type->ref = s;
|
2022-08-20 18:58:56 +08:00
|
|
|
|
|
|
|
if (vla_array_str) {
|
2022-03-21 18:40:43 +08:00
|
|
|
if (t1 & VT_VLA)
|
|
|
|
s->vla_array_str = vla_array_str;
|
|
|
|
else
|
|
|
|
tok_str_free_str(vla_array_str);
|
|
|
|
}
|
2011-03-09 04:56:13 +08:00
|
|
|
}
|
2017-03-06 10:25:33 +08:00
|
|
|
return 1;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2017-03-06 10:25:33 +08:00
|
|
|
/* Parse a type declarator (except basic type), and return the type
|
2009-05-06 02:18:10 +08:00
|
|
|
in 'type'. 'td' is a bitmask indicating which kind of type decl is
|
|
|
|
expected. 'type' should contain the basic type. 'ad' is the
|
|
|
|
attribute definition of the basic type. It can be modified by
|
2017-03-06 10:25:33 +08:00
|
|
|
type_decl(). If this (possibly abstract) declarator is a pointer chain
|
|
|
|
it returns the innermost pointed to type (equals *type, but is a different
|
|
|
|
pointer), otherwise returns type itself, that's used for recursive calls. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static CType *type_decl(CType *type, AttributeDef *ad, int *v, int td)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2017-03-06 10:25:33 +08:00
|
|
|
CType *post, *ret;
|
2013-01-07 00:20:44 +08:00
|
|
|
int qualifiers, storage;
|
|
|
|
|
2017-03-06 10:25:33 +08:00
|
|
|
/* recursive type, remove storage bits first, apply them later again */
|
|
|
|
storage = type->t & VT_STORAGE;
|
|
|
|
type->t &= ~VT_STORAGE;
|
|
|
|
post = ret = type;
|
2017-07-09 18:38:25 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
while (tok == '*') {
|
2009-05-06 02:18:10 +08:00
|
|
|
qualifiers = 0;
|
|
|
|
redo:
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
switch(tok) {
|
2021-01-26 21:29:07 +08:00
|
|
|
case TOK__Atomic:
|
|
|
|
qualifiers |= VT_ATOMIC;
|
|
|
|
goto redo;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_CONST1:
|
|
|
|
case TOK_CONST2:
|
|
|
|
case TOK_CONST3:
|
|
|
|
qualifiers |= VT_CONSTANT;
|
|
|
|
goto redo;
|
|
|
|
case TOK_VOLATILE1:
|
|
|
|
case TOK_VOLATILE2:
|
|
|
|
case TOK_VOLATILE3:
|
|
|
|
qualifiers |= VT_VOLATILE;
|
|
|
|
goto redo;
|
|
|
|
case TOK_RESTRICT1:
|
|
|
|
case TOK_RESTRICT2:
|
|
|
|
case TOK_RESTRICT3:
|
|
|
|
goto redo;
|
2016-07-15 23:56:20 +08:00
|
|
|
/* XXX: clarify attribute handling */
|
|
|
|
case TOK_ATTRIBUTE1:
|
|
|
|
case TOK_ATTRIBUTE2:
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_attribute(ad);
|
2016-07-15 23:56:20 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(type);
|
2009-05-06 02:18:10 +08:00
|
|
|
type->t |= qualifiers;
|
2017-03-06 10:25:33 +08:00
|
|
|
if (ret == type)
|
|
|
|
/* innermost pointed to type is the one for the first derivation */
|
|
|
|
ret = pointed_type(type);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-02-27 09:22:28 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '(') {
|
2017-03-06 10:25:33 +08:00
|
|
|
/* This is possibly a parameter type list for abstract declarators
|
|
|
|
('int ()'), use post_type for testing this. */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!post_type(type, ad, 0, td)) {
|
2017-03-06 10:25:33 +08:00
|
|
|
/* It's not, so it's a nested declarator, and the post operations
|
|
|
|
apply to the innermost pointed to type (if any). */
|
|
|
|
/* XXX: this is not correct to modify 'ad' at this point, but
|
|
|
|
the syntax is not clear */
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_attribute(ad);
|
|
|
|
post = type_decl(type, ad, v, td);
|
|
|
|
skip(')');
|
2019-03-13 00:27:15 +08:00
|
|
|
} else
|
|
|
|
goto abstract;
|
2021-10-22 13:39:54 +08:00
|
|
|
} else if (tok >= TOK_IDENT && (td & TYPE_DIRECT)) {
|
2017-03-06 10:25:33 +08:00
|
|
|
/* type identifier */
|
2021-10-22 13:39:54 +08:00
|
|
|
*v = tok;
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2019-03-13 00:27:15 +08:00
|
|
|
abstract:
|
2017-03-06 10:25:33 +08:00
|
|
|
if (!(td & TYPE_ABSTRACT))
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("identifier");
|
2017-03-06 10:25:33 +08:00
|
|
|
*v = 0;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2022-09-19 21:32:07 +08:00
|
|
|
post_type(post, ad, post != ret ? 0 : storage,
|
|
|
|
td & ~(TYPE_DIRECT|TYPE_ABSTRACT));
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_attribute(ad);
|
2017-04-04 14:34:52 +08:00
|
|
|
type->t |= storage;
|
2017-03-06 10:25:33 +08:00
|
|
|
return ret;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* indirection with full error checking and bound check */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void indir(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) != VT_PTR) {
|
|
|
|
if ((vtop->type.t & VT_BTYPE) == VT_FUNC)
|
2009-05-06 02:18:10 +08:00
|
|
|
return;
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("pointer");
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r & VT_LVAL)
|
|
|
|
gv(RC_INT);
|
|
|
|
vtop->type = *pointed_type(&vtop->type);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* Arrays and functions are never lvalues */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!(vtop->type.t & (VT_ARRAY | VT_VLA))
|
|
|
|
&& (vtop->type.t & VT_BTYPE) != VT_FUNC) {
|
|
|
|
vtop->r |= VT_LVAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* if bound checking, the referenced pointer must be checked */
|
2009-12-20 05:22:43 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->do_bounds_check)
|
|
|
|
vtop->r |= VT_MUSTBOUND;
|
2009-12-20 05:22:43 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* pass a parameter to a function and do type checking and casting */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gfunc_param_typed(Sym *func, Sym *arg)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int func_type;
|
|
|
|
CType type;
|
|
|
|
|
2017-07-09 18:34:11 +08:00
|
|
|
func_type = func->f.func_type;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (func_type == FUNC_OLD ||
|
|
|
|
(func_type == FUNC_ELLIPSIS && arg == NULL)) {
|
|
|
|
/* default casting : only need to convert float to double */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) == VT_FLOAT) {
|
|
|
|
gen_cast_s(VT_DOUBLE);
|
|
|
|
} else if (vtop->type.t & VT_BITFIELD) {
|
|
|
|
type.t = vtop->type.t & (VT_BTYPE | VT_UNSIGNED);
|
|
|
|
type.ref = vtop->type.ref;
|
|
|
|
gen_cast(&type);
|
|
|
|
} else if (vtop->r & VT_MUSTCAST) {
|
|
|
|
force_charshort_cast();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
} else if (arg == NULL) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("too many arguments to function");
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
type = arg->type;
|
|
|
|
type.t &= ~VT_CONSTANT; /* need to do that to avoid false warning */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_assign_cast(&type);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-21 04:21:27 +08:00
|
|
|
/* parse an expression and return its type without any side effect. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_type(CType *type, void (*expr_fn)(void))
|
2017-02-27 10:34:33 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted++;
|
|
|
|
expr_fn();
|
|
|
|
*type = vtop->type;
|
|
|
|
vpop();
|
|
|
|
nocode_wanted--;
|
2017-02-27 10:34:33 +08:00
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* parse an expression of the form '(type)' or '(expr)' and return its
|
|
|
|
type */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void parse_expr_type(CType *type)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
AttributeDef ad;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
2022-07-04 21:24:46 +08:00
|
|
|
if (parse_btype(type, &ad, 0)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
type_decl(type, &ad, &n, TYPE_ABSTRACT);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_type(type, gexpr);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void parse_type(CType *type)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
AttributeDef ad;
|
|
|
|
int n;
|
|
|
|
|
2022-07-04 21:24:46 +08:00
|
|
|
if (!parse_btype(type, &ad, 0)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("type");
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
type_decl(type, &ad, &n, TYPE_ABSTRACT);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void parse_builtin_params(int nc, const char *args)
|
2017-02-27 09:22:28 +08:00
|
|
|
{
|
|
|
|
char c, sep = '(';
|
2020-07-05 20:01:50 +08:00
|
|
|
CType type;
|
2017-02-27 09:22:28 +08:00
|
|
|
if (nc)
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted++;
|
|
|
|
next();
|
2020-07-05 20:01:50 +08:00
|
|
|
if (*args == 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(sep);
|
2017-02-27 09:22:28 +08:00
|
|
|
while ((c = *args++)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(sep);
|
2017-02-27 09:22:28 +08:00
|
|
|
sep = ',';
|
2020-07-06 06:00:42 +08:00
|
|
|
if (c == 't') {
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_type(&type);
|
|
|
|
vpush(&type);
|
2020-07-06 06:00:42 +08:00
|
|
|
continue;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_eq();
|
2020-07-06 06:00:42 +08:00
|
|
|
type.ref = NULL;
|
|
|
|
type.t = 0;
|
2017-02-27 09:22:28 +08:00
|
|
|
switch (c) {
|
2020-07-06 06:00:42 +08:00
|
|
|
case 'e':
|
|
|
|
continue;
|
|
|
|
case 'V':
|
|
|
|
type.t = VT_CONSTANT;
|
2020-07-05 20:01:50 +08:00
|
|
|
case 'v':
|
2020-07-06 06:00:42 +08:00
|
|
|
type.t |= VT_VOID;
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer (&type);
|
2020-07-06 06:00:42 +08:00
|
|
|
break;
|
|
|
|
case 'S':
|
|
|
|
type.t = VT_CONSTANT;
|
2020-07-05 20:01:50 +08:00
|
|
|
case 's':
|
2021-10-22 13:39:54 +08:00
|
|
|
type.t |= char_type.t;
|
|
|
|
mk_pointer (&type);
|
2020-07-06 06:00:42 +08:00
|
|
|
break;
|
|
|
|
case 'i':
|
|
|
|
type.t = VT_INT;
|
|
|
|
break;
|
|
|
|
case 'l':
|
|
|
|
type.t = VT_SIZE_T;
|
|
|
|
break;
|
|
|
|
default:
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
break;
|
2017-02-27 09:22:28 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_assign_cast(&type);
|
2017-02-27 09:22:28 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2017-02-27 09:22:28 +08:00
|
|
|
if (nc)
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted--;
|
2017-02-27 09:22:28 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void parse_atomic(int atok)
|
2021-01-26 21:29:07 +08:00
|
|
|
{
|
2022-10-17 00:51:56 +08:00
|
|
|
int size, align, arg, t, save = 0;
|
2021-03-30 15:26:26 +08:00
|
|
|
CType *atom, *atom_ptr, ct = {0};
|
2022-10-17 00:51:56 +08:00
|
|
|
SValue store;
|
2021-03-30 15:26:26 +08:00
|
|
|
char buf[40];
|
2021-03-17 05:29:45 +08:00
|
|
|
static const char *const templates[] = {
|
2021-01-27 02:24:58 +08:00
|
|
|
/*
|
2021-02-15 02:41:59 +08:00
|
|
|
* Each entry consists of callback and function template.
|
|
|
|
* The template represents argument types and return type.
|
|
|
|
*
|
|
|
|
* ? void (return-only)
|
|
|
|
* b bool
|
|
|
|
* a atomic
|
|
|
|
* A read-only atomic
|
|
|
|
* p pointer to memory
|
|
|
|
* v value
|
2022-10-17 00:51:56 +08:00
|
|
|
* l load pointer
|
|
|
|
* s save pointer
|
2021-02-15 02:41:59 +08:00
|
|
|
* m memory model
|
2021-01-27 02:24:58 +08:00
|
|
|
*/
|
2021-03-30 15:26:26 +08:00
|
|
|
|
|
|
|
/* keep in order of appearance in tcctok.h: */
|
2022-10-17 00:51:56 +08:00
|
|
|
/* __atomic_store */ "alm.?",
|
|
|
|
/* __atomic_load */ "Asm.v",
|
|
|
|
/* __atomic_exchange */ "alsm.v",
|
|
|
|
/* __atomic_compare_exchange */ "aplbmm.b",
|
2021-03-30 15:26:26 +08:00
|
|
|
/* __atomic_fetch_add */ "avm.v",
|
|
|
|
/* __atomic_fetch_sub */ "avm.v",
|
|
|
|
/* __atomic_fetch_or */ "avm.v",
|
|
|
|
/* __atomic_fetch_xor */ "avm.v",
|
2022-10-17 00:51:56 +08:00
|
|
|
/* __atomic_fetch_and */ "avm.v",
|
|
|
|
/* __atomic_fetch_nand */ "avm.v",
|
|
|
|
/* __atomic_and_fetch */ "avm.v",
|
|
|
|
/* __atomic_sub_fetch */ "avm.v",
|
|
|
|
/* __atomic_or_fetch */ "avm.v",
|
|
|
|
/* __atomic_xor_fetch */ "avm.v",
|
|
|
|
/* __atomic_and_fetch */ "avm.v",
|
|
|
|
/* __atomic_nand_fetch */ "avm.v"
|
2021-01-27 02:24:58 +08:00
|
|
|
};
|
2021-03-30 15:26:26 +08:00
|
|
|
const char *template = templates[(atok - TOK___atomic_store)];
|
2021-01-26 21:29:07 +08:00
|
|
|
|
2021-03-30 15:26:26 +08:00
|
|
|
atom = atom_ptr = NULL;
|
|
|
|
size = 0; /* pacify compiler */
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
skip('(');
|
2021-03-30 15:26:26 +08:00
|
|
|
for (arg = 0;;) {
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_eq();
|
2021-02-15 02:41:59 +08:00
|
|
|
switch (template[arg]) {
|
2021-01-26 21:29:07 +08:00
|
|
|
case 'a':
|
|
|
|
case 'A':
|
2021-10-22 13:39:54 +08:00
|
|
|
atom_ptr = &vtop->type;
|
2021-03-30 15:26:26 +08:00
|
|
|
if ((atom_ptr->t & VT_BTYPE) != VT_PTR)
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("pointer");
|
2021-03-30 15:26:26 +08:00
|
|
|
atom = pointed_type(atom_ptr);
|
|
|
|
size = type_size(atom, &align);
|
|
|
|
if (size > 8
|
|
|
|
|| (size & (size - 1))
|
|
|
|
|| (atok > TOK___atomic_compare_exchange
|
|
|
|
&& (0 == btype_size(atom->t & VT_BTYPE)
|
|
|
|
|| (atom->t & VT_BTYPE) == VT_PTR)))
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("integral or integer-sized pointer target type");
|
2021-03-30 15:26:26 +08:00
|
|
|
/* GCC does not care either: */
|
|
|
|
/* if (!(atom->t & VT_ATOMIC))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("pointer target declaration is missing '_Atomic'"); */
|
2021-01-26 21:29:07 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'p':
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) != VT_PTR
|
|
|
|
|| type_size(pointed_type(&vtop->type), &align) != size)
|
|
|
|
tcc_error("pointer target type mismatch in argument %d", arg + 1);
|
|
|
|
gen_assign_cast(atom_ptr);
|
2021-01-26 21:29:07 +08:00
|
|
|
break;
|
|
|
|
case 'v':
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_assign_cast(atom);
|
2021-01-26 21:29:07 +08:00
|
|
|
break;
|
2022-10-17 00:51:56 +08:00
|
|
|
case 'l':
|
|
|
|
indir();
|
|
|
|
gen_assign_cast(atom);
|
|
|
|
break;
|
|
|
|
case 's':
|
|
|
|
save = 1;
|
|
|
|
indir();
|
|
|
|
store = *vtop;
|
|
|
|
vpop();
|
|
|
|
break;
|
2021-01-26 21:29:07 +08:00
|
|
|
case 'm':
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_assign_cast(&int_type);
|
2021-03-30 15:26:26 +08:00
|
|
|
break;
|
|
|
|
case 'b':
|
|
|
|
ct.t = VT_BOOL;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_assign_cast(&ct);
|
2021-01-26 21:29:07 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-03-30 15:26:26 +08:00
|
|
|
if ('.' == template[++arg])
|
2021-01-26 21:29:07 +08:00
|
|
|
break;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(',');
|
2021-01-26 21:29:07 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2021-01-26 21:29:07 +08:00
|
|
|
|
2021-03-30 15:26:26 +08:00
|
|
|
ct.t = VT_VOID;
|
|
|
|
switch (template[arg + 1]) {
|
stdatomic: ld/st/xchg/cmpxchg on simple types
Some complex types can still be small and simple enough to fit into
register. Other compilers allow some operations on these types, and it
seems to be quite a reasonable choice. From now on, we should be able
to compile the following artificial example:
struct combo {
uint16_t lo;
uint16_t hi;
};
struct combo load(const _Atomic(struct combo) *atom)
{
return atomic_load(atom);
}
void store(_Atomic(struct combo) *atom, struct combo value)
{
atomic_store(atom, value);
}
struct combo xchg(_Atomic(struct combo) *atom, struct combo value)
{
return atomic_exchange(atom, value);
}
bool cmpxchg(_Atomic(struct combo) *atom,
struct combo *cmp, struct combo xchg)
{
return atomic_compare_exchange_strong(atom, cmp, xchg);
}
This might be useful for some corner cases, though it is quite likely
that many programmers will prefer operating on a single 32-bit value
instead of using the structure consisting of 16-bit pair.
Things will work as long as the overall structure size happens to be
the same as for any integer type we support in atomics.
2021-03-19 05:54:24 +08:00
|
|
|
case 'b':
|
2021-03-30 15:26:26 +08:00
|
|
|
ct.t = VT_BOOL;
|
stdatomic: ld/st/xchg/cmpxchg on simple types
Some complex types can still be small and simple enough to fit into
register. Other compilers allow some operations on these types, and it
seems to be quite a reasonable choice. From now on, we should be able
to compile the following artificial example:
struct combo {
uint16_t lo;
uint16_t hi;
};
struct combo load(const _Atomic(struct combo) *atom)
{
return atomic_load(atom);
}
void store(_Atomic(struct combo) *atom, struct combo value)
{
atomic_store(atom, value);
}
struct combo xchg(_Atomic(struct combo) *atom, struct combo value)
{
return atomic_exchange(atom, value);
}
bool cmpxchg(_Atomic(struct combo) *atom,
struct combo *cmp, struct combo xchg)
{
return atomic_compare_exchange_strong(atom, cmp, xchg);
}
This might be useful for some corner cases, though it is quite likely
that many programmers will prefer operating on a single 32-bit value
instead of using the structure consisting of 16-bit pair.
Things will work as long as the overall structure size happens to be
the same as for any integer type we support in atomics.
2021-03-19 05:54:24 +08:00
|
|
|
break;
|
|
|
|
case 'v':
|
2021-03-30 15:26:26 +08:00
|
|
|
ct = *atom;
|
stdatomic: ld/st/xchg/cmpxchg on simple types
Some complex types can still be small and simple enough to fit into
register. Other compilers allow some operations on these types, and it
seems to be quite a reasonable choice. From now on, we should be able
to compile the following artificial example:
struct combo {
uint16_t lo;
uint16_t hi;
};
struct combo load(const _Atomic(struct combo) *atom)
{
return atomic_load(atom);
}
void store(_Atomic(struct combo) *atom, struct combo value)
{
atomic_store(atom, value);
}
struct combo xchg(_Atomic(struct combo) *atom, struct combo value)
{
return atomic_exchange(atom, value);
}
bool cmpxchg(_Atomic(struct combo) *atom,
struct combo *cmp, struct combo xchg)
{
return atomic_compare_exchange_strong(atom, cmp, xchg);
}
This might be useful for some corner cases, though it is quite likely
that many programmers will prefer operating on a single 32-bit value
instead of using the structure consisting of 16-bit pair.
Things will work as long as the overall structure size happens to be
the same as for any integer type we support in atomics.
2021-03-19 05:54:24 +08:00
|
|
|
break;
|
2021-03-30 15:26:26 +08:00
|
|
|
}
|
stdatomic: ld/st/xchg/cmpxchg on simple types
Some complex types can still be small and simple enough to fit into
register. Other compilers allow some operations on these types, and it
seems to be quite a reasonable choice. From now on, we should be able
to compile the following artificial example:
struct combo {
uint16_t lo;
uint16_t hi;
};
struct combo load(const _Atomic(struct combo) *atom)
{
return atomic_load(atom);
}
void store(_Atomic(struct combo) *atom, struct combo value)
{
atomic_store(atom, value);
}
struct combo xchg(_Atomic(struct combo) *atom, struct combo value)
{
return atomic_exchange(atom, value);
}
bool cmpxchg(_Atomic(struct combo) *atom,
struct combo *cmp, struct combo xchg)
{
return atomic_compare_exchange_strong(atom, cmp, xchg);
}
This might be useful for some corner cases, though it is quite likely
that many programmers will prefer operating on a single 32-bit value
instead of using the structure consisting of 16-bit pair.
Things will work as long as the overall structure size happens to be
the same as for any integer type we support in atomics.
2021-03-19 05:54:24 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
sprintf(buf, "%s_%d", get_tok_str(atok, 0), size);
|
|
|
|
vpush_helper_func(tok_alloc_const(buf));
|
2022-10-17 00:51:56 +08:00
|
|
|
vrott(arg - save + 1);
|
|
|
|
gfunc_call(arg - save);
|
stdatomic: ld/st/xchg/cmpxchg on simple types
Some complex types can still be small and simple enough to fit into
register. Other compilers allow some operations on these types, and it
seems to be quite a reasonable choice. From now on, we should be able
to compile the following artificial example:
struct combo {
uint16_t lo;
uint16_t hi;
};
struct combo load(const _Atomic(struct combo) *atom)
{
return atomic_load(atom);
}
void store(_Atomic(struct combo) *atom, struct combo value)
{
atomic_store(atom, value);
}
struct combo xchg(_Atomic(struct combo) *atom, struct combo value)
{
return atomic_exchange(atom, value);
}
bool cmpxchg(_Atomic(struct combo) *atom,
struct combo *cmp, struct combo xchg)
{
return atomic_compare_exchange_strong(atom, cmp, xchg);
}
This might be useful for some corner cases, though it is quite likely
that many programmers will prefer operating on a single 32-bit value
instead of using the structure consisting of 16-bit pair.
Things will work as long as the overall structure size happens to be
the same as for any integer type we support in atomics.
2021-03-19 05:54:24 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush(&ct);
|
|
|
|
PUT_R_RET(vtop, ct.t);
|
2022-10-17 00:51:56 +08:00
|
|
|
t = ct.t & VT_BTYPE;
|
|
|
|
if (t == VT_BYTE || t == VT_SHORT || t == VT_BOOL) {
|
2021-04-14 17:59:57 +08:00
|
|
|
#ifdef PROMOTE_RET
|
2022-10-17 00:51:56 +08:00
|
|
|
vtop->r |= BFVAL(VT_MUSTCAST, 1);
|
2021-04-14 17:59:57 +08:00
|
|
|
#else
|
2022-10-17 00:51:56 +08:00
|
|
|
vtop->type.t = VT_INT;
|
2021-04-14 17:59:57 +08:00
|
|
|
#endif
|
|
|
|
}
|
2022-10-17 00:51:56 +08:00
|
|
|
gen_cast(&ct);
|
|
|
|
if (save) {
|
|
|
|
vpush(&ct);
|
|
|
|
*vtop = store;
|
|
|
|
vswap();
|
|
|
|
vstore();
|
|
|
|
}
|
2021-01-26 21:29:07 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void unary(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2010-04-11 07:53:40 +08:00
|
|
|
int n, t, align, size, r, sizeof_caller;
|
2009-05-06 02:18:10 +08:00
|
|
|
CType type;
|
|
|
|
Sym *s;
|
|
|
|
AttributeDef ad;
|
|
|
|
|
2020-01-18 05:58:39 +08:00
|
|
|
/* generate line number info */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_debug_line(tcc_state), tcc_tcov_check_line (tcc_state, 1);
|
2020-01-18 05:58:39 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
sizeof_caller = in_sizeof;
|
|
|
|
in_sizeof = 0;
|
2017-07-09 18:34:11 +08:00
|
|
|
type.ref = NULL;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* XXX: GCC 2.95.3 does not generate a table although it should be
|
|
|
|
better here */
|
|
|
|
tok_next:
|
2021-10-22 13:39:54 +08:00
|
|
|
switch(tok) {
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_EXTENSION:
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
goto tok_next;
|
2017-09-25 00:57:48 +08:00
|
|
|
case TOK_LCHAR:
|
|
|
|
#ifdef TCC_TARGET_PE
|
|
|
|
t = VT_SHORT|VT_UNSIGNED;
|
|
|
|
goto push_tokc;
|
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_CINT:
|
2015-07-30 04:53:57 +08:00
|
|
|
case TOK_CCHAR:
|
2017-03-07 04:45:41 +08:00
|
|
|
t = VT_INT;
|
|
|
|
push_tokc:
|
|
|
|
type.t = t;
|
2021-10-22 13:39:54 +08:00
|
|
|
vsetc(&type, VT_CONST, &tokc);
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_CUINT:
|
2017-03-07 04:45:41 +08:00
|
|
|
t = VT_INT | VT_UNSIGNED;
|
|
|
|
goto push_tokc;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_CLLONG:
|
2017-03-07 04:45:41 +08:00
|
|
|
t = VT_LLONG;
|
|
|
|
goto push_tokc;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_CULLONG:
|
2017-03-12 12:25:09 +08:00
|
|
|
t = VT_LLONG | VT_UNSIGNED;
|
2017-03-07 04:45:41 +08:00
|
|
|
goto push_tokc;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_CFLOAT:
|
2017-03-07 04:45:41 +08:00
|
|
|
t = VT_FLOAT;
|
|
|
|
goto push_tokc;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_CDOUBLE:
|
2017-03-07 04:45:41 +08:00
|
|
|
t = VT_DOUBLE;
|
|
|
|
goto push_tokc;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_CLDOUBLE:
|
2017-03-07 04:45:41 +08:00
|
|
|
t = VT_LDOUBLE;
|
|
|
|
goto push_tokc;
|
mutiples fix for _Generic
* check that _Generic don't match unsigned char * with char *
this case is usefull as with -funsigned-char, 'char *' are unsigned
* change VT_LONG so it's now a qualifier
VT_LONG are never use for code generation, but only durring parsing state,
in _Generic we need to be able to make diference between
'long' and 'long long'
So VT_LONG is now use as a type qualifier, it's old behaviour is still
here, but we can keep trace of what was a long and what wasn't
* add TOK_CLONG and TOK_CULONG
tcc was directly converting value like '7171L' into TOK_CLLONG or
TOK_CINT depending of the machine architecture.
because of that, we was unable to make diference between a long and a
long long, which doesn't work with _Generic.
So now 7171L is a TOK_CLONG, and we can handle _Generic properly
* check that _Generic can make diference between long and long long
* uncomment "type match twice" as it should now pass tests on any platforms
* add inside_generic global
the point of this variable is to use VT_LONG in comparaison only
when we are evaluating a _Generic.
problem is with my lastest patchs tcc can now make the diference between
a 'long long' and a 'long', but in 64 bit stddef.h typedef uint64_t as
typedef signed long long int int64_t and stdint.h as unsigned long int, so tcc
break when stdint.h and stddef.h are include together.
Another solution woud be to modifie include/stddef.h so it define uint64_t as
unsigned long int when processor is 64 bit, but this could break some
legacy code, so for now, VT_LONG are use only inside generc.
* check that _Generic parse first argument correctly
* check that _Generic evaluate correctly exresion like "f() / 2"
2017-07-10 23:44:53 +08:00
|
|
|
case TOK_CLONG:
|
2017-09-25 00:57:48 +08:00
|
|
|
t = (LONG_SIZE == 8 ? VT_LLONG : VT_INT) | VT_LONG;
|
|
|
|
goto push_tokc;
|
mutiples fix for _Generic
* check that _Generic don't match unsigned char * with char *
this case is usefull as with -funsigned-char, 'char *' are unsigned
* change VT_LONG so it's now a qualifier
VT_LONG are never use for code generation, but only durring parsing state,
in _Generic we need to be able to make diference between
'long' and 'long long'
So VT_LONG is now use as a type qualifier, it's old behaviour is still
here, but we can keep trace of what was a long and what wasn't
* add TOK_CLONG and TOK_CULONG
tcc was directly converting value like '7171L' into TOK_CLLONG or
TOK_CINT depending of the machine architecture.
because of that, we was unable to make diference between a long and a
long long, which doesn't work with _Generic.
So now 7171L is a TOK_CLONG, and we can handle _Generic properly
* check that _Generic can make diference between long and long long
* uncomment "type match twice" as it should now pass tests on any platforms
* add inside_generic global
the point of this variable is to use VT_LONG in comparaison only
when we are evaluating a _Generic.
problem is with my lastest patchs tcc can now make the diference between
a 'long long' and a 'long', but in 64 bit stddef.h typedef uint64_t as
typedef signed long long int int64_t and stdint.h as unsigned long int, so tcc
break when stdint.h and stddef.h are include together.
Another solution woud be to modifie include/stddef.h so it define uint64_t as
unsigned long int when processor is 64 bit, but this could break some
legacy code, so for now, VT_LONG are use only inside generc.
* check that _Generic parse first argument correctly
* check that _Generic evaluate correctly exresion like "f() / 2"
2017-07-10 23:44:53 +08:00
|
|
|
case TOK_CULONG:
|
2017-09-25 00:57:48 +08:00
|
|
|
t = (LONG_SIZE == 8 ? VT_LLONG : VT_INT) | VT_LONG | VT_UNSIGNED;
|
mutiples fix for _Generic
* check that _Generic don't match unsigned char * with char *
this case is usefull as with -funsigned-char, 'char *' are unsigned
* change VT_LONG so it's now a qualifier
VT_LONG are never use for code generation, but only durring parsing state,
in _Generic we need to be able to make diference between
'long' and 'long long'
So VT_LONG is now use as a type qualifier, it's old behaviour is still
here, but we can keep trace of what was a long and what wasn't
* add TOK_CLONG and TOK_CULONG
tcc was directly converting value like '7171L' into TOK_CLLONG or
TOK_CINT depending of the machine architecture.
because of that, we was unable to make diference between a long and a
long long, which doesn't work with _Generic.
So now 7171L is a TOK_CLONG, and we can handle _Generic properly
* check that _Generic can make diference between long and long long
* uncomment "type match twice" as it should now pass tests on any platforms
* add inside_generic global
the point of this variable is to use VT_LONG in comparaison only
when we are evaluating a _Generic.
problem is with my lastest patchs tcc can now make the diference between
a 'long long' and a 'long', but in 64 bit stddef.h typedef uint64_t as
typedef signed long long int int64_t and stdint.h as unsigned long int, so tcc
break when stdint.h and stddef.h are include together.
Another solution woud be to modifie include/stddef.h so it define uint64_t as
unsigned long int when processor is 64 bit, but this could break some
legacy code, so for now, VT_LONG are use only inside generc.
* check that _Generic parse first argument correctly
* check that _Generic evaluate correctly exresion like "f() / 2"
2017-07-10 23:44:53 +08:00
|
|
|
goto push_tokc;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK___FUNCTION__:
|
|
|
|
if (!gnu_ext)
|
|
|
|
goto tok_identifier;
|
|
|
|
/* fall thru */
|
|
|
|
case TOK___FUNC__:
|
|
|
|
{
|
2021-02-01 22:10:58 +08:00
|
|
|
Section *sec;
|
2009-05-06 02:18:10 +08:00
|
|
|
int len;
|
|
|
|
/* special function name identifier */
|
2021-10-22 13:39:54 +08:00
|
|
|
len = strlen(funcname) + 1;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* generate char[len] type */
|
2021-10-22 13:39:54 +08:00
|
|
|
type.t = char_type.t;
|
|
|
|
if (tcc_state->warn_write_strings & WARN_ON)
|
2021-02-01 22:10:58 +08:00
|
|
|
type.t |= VT_CONSTANT;
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&type);
|
2009-05-06 02:18:10 +08:00
|
|
|
type.t |= VT_ARRAY;
|
|
|
|
type.ref->c = len;
|
2021-02-01 22:10:58 +08:00
|
|
|
sec = rodata_section;
|
2022-05-29 02:50:09 +08:00
|
|
|
vpush_ref(&type, sec, sec->data_offset, len);
|
2021-08-01 02:44:51 +08:00
|
|
|
if (!NODATA_WANTED)
|
2021-10-22 13:39:54 +08:00
|
|
|
memcpy(section_ptr_add(sec, len), funcname, len);
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TOK_LSTR:
|
|
|
|
#ifdef TCC_TARGET_PE
|
|
|
|
t = VT_SHORT | VT_UNSIGNED;
|
|
|
|
#else
|
|
|
|
t = VT_INT;
|
|
|
|
#endif
|
|
|
|
goto str_init;
|
|
|
|
case TOK_STR:
|
|
|
|
/* string parsing */
|
2021-10-22 13:39:54 +08:00
|
|
|
t = char_type.t;
|
2009-05-06 02:18:10 +08:00
|
|
|
str_init:
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->warn_write_strings & WARN_ON)
|
2017-07-04 22:37:49 +08:00
|
|
|
t |= VT_CONSTANT;
|
2009-05-06 02:18:10 +08:00
|
|
|
type.t = t;
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&type);
|
2009-05-06 02:18:10 +08:00
|
|
|
type.t |= VT_ARRAY;
|
|
|
|
memset(&ad, 0, sizeof(AttributeDef));
|
2021-02-01 22:10:58 +08:00
|
|
|
ad.section = rodata_section;
|
2021-10-22 13:39:54 +08:00
|
|
|
decl_initializer_alloc(&type, &ad, VT_CONST, 2, 0, 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case '(':
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* cast ? */
|
2022-07-04 21:24:46 +08:00
|
|
|
if (parse_btype(&type, &ad, 0)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
type_decl(&type, &ad, &n, TYPE_ABSTRACT);
|
|
|
|
skip(')');
|
2009-05-06 02:18:10 +08:00
|
|
|
/* check ISOC99 compound literal */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '{') {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* data is allocated locally by default */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (global_expr)
|
2009-05-06 02:18:10 +08:00
|
|
|
r = VT_CONST;
|
|
|
|
else
|
|
|
|
r = VT_LOCAL;
|
|
|
|
/* all except arrays are lvalues */
|
|
|
|
if (!(type.t & VT_ARRAY))
|
2019-12-17 01:48:31 +08:00
|
|
|
r |= VT_LVAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
memset(&ad, 0, sizeof(AttributeDef));
|
2021-10-22 13:39:54 +08:00
|
|
|
decl_initializer_alloc(&type, &ad, r, 1, 0, 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2010-04-11 07:53:40 +08:00
|
|
|
if (sizeof_caller) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush(&type);
|
2010-04-11 07:53:40 +08:00
|
|
|
return;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
unary();
|
|
|
|
gen_cast(&type);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
} else if (tok == '{') {
|
|
|
|
int saved_nocode_wanted = nocode_wanted;
|
|
|
|
if (const_wanted && !(nocode_wanted & unevalmask))
|
|
|
|
expect("constant");
|
|
|
|
if (0 == local_scope)
|
|
|
|
tcc_error("statement expression outside of function");
|
2009-05-06 02:18:10 +08:00
|
|
|
/* save all registers */
|
2021-10-22 13:39:54 +08:00
|
|
|
save_regs(0);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* statement expression : we do not accept break/continue
|
2016-12-20 11:49:22 +08:00
|
|
|
inside as GCC does. We do retain the nocode_wanted state,
|
|
|
|
as statement expressions can't ever be entered from the
|
|
|
|
outside, so any reactivation of code emission (from labels
|
|
|
|
or loop heads) can be disabled again after the end of it. */
|
2021-10-22 13:39:54 +08:00
|
|
|
block(1);
|
2022-07-01 23:18:41 +08:00
|
|
|
/* If the statement expr can be entered, then we retain the current
|
|
|
|
nocode_wanted state (from e.g. a 'return 0;' in the stmt-expr).
|
|
|
|
If it can't be entered then the state is that from before the
|
|
|
|
statement expression. */
|
|
|
|
if (saved_nocode_wanted)
|
|
|
|
nocode_wanted = saved_nocode_wanted;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gexpr();
|
|
|
|
skip(')');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '*':
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
unary();
|
|
|
|
indir();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case '&':
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
unary();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* functions names must be treated as function pointers,
|
|
|
|
except for unary '&' and sizeof. Since we consider that
|
|
|
|
functions are not lvalues, we only have to handle it
|
|
|
|
there and in function calls. */
|
|
|
|
/* arrays can also be used although they are not lvalues */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) != VT_FUNC &&
|
2022-09-19 21:32:07 +08:00
|
|
|
!(vtop->type.t & (VT_ARRAY | VT_VLA)))
|
2021-10-22 13:39:54 +08:00
|
|
|
test_lvalue();
|
|
|
|
if (vtop->sym)
|
|
|
|
vtop->sym->a.addrtaken = 1;
|
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
gaddrof();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case '!':
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
unary();
|
|
|
|
gen_test_zero(TOK_EQ);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case '~':
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
unary();
|
|
|
|
vpushi(-1);
|
|
|
|
gen_op('^');
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case '+':
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
unary();
|
|
|
|
if ((vtop->type.t & VT_BTYPE) == VT_PTR)
|
|
|
|
tcc_error("pointer not accepted for unary plus");
|
2014-01-12 11:44:27 +08:00
|
|
|
/* In order to force cast, we add zero, except for floating point
|
2015-07-30 04:53:57 +08:00
|
|
|
where we really need an noop (otherwise -0.0 will be transformed
|
|
|
|
into +0.0). */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!is_float(vtop->type.t)) {
|
|
|
|
vpushi(0);
|
|
|
|
gen_op('+');
|
2015-07-30 04:53:57 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_SIZEOF:
|
|
|
|
case TOK_ALIGNOF1:
|
|
|
|
case TOK_ALIGNOF2:
|
2018-12-13 02:53:58 +08:00
|
|
|
case TOK_ALIGNOF3:
|
2021-10-22 13:39:54 +08:00
|
|
|
t = tok;
|
|
|
|
next();
|
|
|
|
in_sizeof++;
|
|
|
|
expr_type(&type, unary); /* Perform a in_sizeof = 0; */
|
2009-05-06 02:18:10 +08:00
|
|
|
if (t == TOK_SIZEOF) {
|
2021-12-08 17:49:28 +08:00
|
|
|
vpush_type_size(&type, &align);
|
|
|
|
gen_cast_s(VT_SIZE_T);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-12-08 17:49:28 +08:00
|
|
|
type_size(&type, &align);
|
|
|
|
s = NULL;
|
|
|
|
if (vtop[1].r & VT_SYM)
|
|
|
|
s = vtop[1].sym; /* hack: accessing previous vtop */
|
|
|
|
if (s && s->a.aligned)
|
|
|
|
align = 1 << (s->a.aligned - 1);
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushs(align);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-04-16 17:41:53 +08:00
|
|
|
case TOK_builtin_expect:
|
2017-02-27 09:22:28 +08:00
|
|
|
/* __builtin_expect is a no-op for now */
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(0, "ee");
|
|
|
|
vpop();
|
2016-04-16 17:41:53 +08:00
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_builtin_types_compatible_p:
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(0, "tt");
|
|
|
|
vtop[-1].type.t &= ~(VT_CONSTANT | VT_VOLATILE);
|
|
|
|
vtop[0].type.t &= ~(VT_CONSTANT | VT_VOLATILE);
|
|
|
|
n = is_compatible_types(&vtop[-1].type, &vtop[0].type);
|
|
|
|
vtop -= 2;
|
|
|
|
vpushi(n);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2016-07-13 21:11:40 +08:00
|
|
|
case TOK_builtin_choose_expr:
|
|
|
|
{
|
2016-11-06 12:02:11 +08:00
|
|
|
int64_t c;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
skip('(');
|
|
|
|
c = expr_const64();
|
|
|
|
skip(',');
|
2016-07-13 21:11:40 +08:00
|
|
|
if (!c) {
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted++;
|
2016-07-13 21:11:40 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_eq();
|
2016-07-13 21:11:40 +08:00
|
|
|
if (!c) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
|
|
|
nocode_wanted--;
|
2016-07-13 21:11:40 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(',');
|
2016-07-13 21:11:40 +08:00
|
|
|
if (c) {
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted++;
|
2016-07-13 21:11:40 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_eq();
|
2016-07-13 21:11:40 +08:00
|
|
|
if (c) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
|
|
|
nocode_wanted--;
|
2016-07-13 21:11:40 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2016-07-13 21:11:40 +08:00
|
|
|
}
|
|
|
|
break;
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_builtin_constant_p:
|
2022-11-29 14:56:26 +08:00
|
|
|
constant_p = 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(1, "e");
|
2022-11-29 14:56:26 +08:00
|
|
|
n = constant_p &&
|
|
|
|
(vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
|
2021-10-22 13:39:54 +08:00
|
|
|
!((vtop->r & VT_SYM) && vtop->sym->a.addrtaken);
|
|
|
|
vtop--;
|
|
|
|
vpushi(n);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_builtin_frame_address:
|
2015-03-07 05:01:14 +08:00
|
|
|
case TOK_builtin_return_address:
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
int tok1 = tok;
|
2023-01-16 15:37:47 +08:00
|
|
|
int64_t level;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
skip('(');
|
2023-01-16 15:37:47 +08:00
|
|
|
level = expr_const64();
|
|
|
|
if (level < 0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("%s only takes positive integers",
|
2015-03-07 05:01:14 +08:00
|
|
|
tok1 == TOK_builtin_return_address ?
|
|
|
|
"__builtin_return_address" :
|
|
|
|
"__builtin_frame_address");
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(')');
|
2009-05-06 02:18:10 +08:00
|
|
|
type.t = VT_VOID;
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&type);
|
|
|
|
vset(&type, VT_LOCAL, 0); /* local frame */
|
Add support for __builtin_frame_address(level)
Continuing d6072d37 (Add __builtin_frame_address(0)) implement
__builtin_frame_address for levels greater than zero, in order for
tinycc to be able to compile its own lib/bcheck.c after
cffb7af9 (lib/bcheck: Prevent __bound_local_new / __bound_local_delete
from being miscompiled).
I'm new to the internals, and used the most simple way to do it.
Generated code is not very good for levels >= 2, compare
gcc tcc
level=0 mov %ebp,%eax lea 0x0(%ebp),%eax
level=1 mov 0x0(%ebp),%eax mov 0x0(%ebp),%eax
level=2 mov 0x0(%ebp),%eax mov 0x0(%ebp),%eax
mov (%eax),%eax mov %eax,-0x10(%ebp)
mov -0x10(%ebp),%eax
mov (%eax),%eax
level=3 mov 0x0(%ebp),%eax mov 0x0(%ebp),%eax
mov (%eax),%eax mov (%eax),%ecx
mov (%eax),%eax mov (%ecx),%eax
But this is still an improvement and for bcheck we need level=1 for
which the code is good.
For the tests I had to force gcc use -O0 to not inline the functions.
And -fno-omit-frame-pointer just in case.
If someone knows how to improve the generated code - help is
appreciated.
Thanks,
Kirill
Cc: Michael Matz <matz@suse.de>
Cc: Shinichiro Hamaji <shinichiro.hamaji@gmail.com>
2012-11-15 07:31:49 +08:00
|
|
|
while (level--) {
|
2020-06-16 13:39:48 +08:00
|
|
|
#ifdef TCC_TARGET_RISCV64
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(2*PTR_SIZE);
|
|
|
|
gen_op('-');
|
2020-06-16 13:39:48 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
indir(); /* -> parent frame */
|
Add support for __builtin_frame_address(level)
Continuing d6072d37 (Add __builtin_frame_address(0)) implement
__builtin_frame_address for levels greater than zero, in order for
tinycc to be able to compile its own lib/bcheck.c after
cffb7af9 (lib/bcheck: Prevent __bound_local_new / __bound_local_delete
from being miscompiled).
I'm new to the internals, and used the most simple way to do it.
Generated code is not very good for levels >= 2, compare
gcc tcc
level=0 mov %ebp,%eax lea 0x0(%ebp),%eax
level=1 mov 0x0(%ebp),%eax mov 0x0(%ebp),%eax
level=2 mov 0x0(%ebp),%eax mov 0x0(%ebp),%eax
mov (%eax),%eax mov %eax,-0x10(%ebp)
mov -0x10(%ebp),%eax
mov (%eax),%eax
level=3 mov 0x0(%ebp),%eax mov 0x0(%ebp),%eax
mov (%eax),%eax mov (%eax),%ecx
mov (%eax),%eax mov (%ecx),%eax
But this is still an improvement and for bcheck we need level=1 for
which the code is good.
For the tests I had to force gcc use -O0 to not inline the functions.
And -fno-omit-frame-pointer just in case.
If someone knows how to improve the generated code - help is
appreciated.
Thanks,
Kirill
Cc: Michael Matz <matz@suse.de>
Cc: Shinichiro Hamaji <shinichiro.hamaji@gmail.com>
2012-11-15 07:31:49 +08:00
|
|
|
}
|
2015-03-07 05:01:14 +08:00
|
|
|
if (tok1 == TOK_builtin_return_address) {
|
|
|
|
// assume return address is just above frame pointer on stack
|
2020-06-16 13:39:48 +08:00
|
|
|
#ifdef TCC_TARGET_ARM
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(2*PTR_SIZE);
|
|
|
|
gen_op('+');
|
2020-06-16 13:39:48 +08:00
|
|
|
#elif defined TCC_TARGET_RISCV64
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(PTR_SIZE);
|
|
|
|
gen_op('-');
|
2020-06-16 13:39:48 +08:00
|
|
|
#else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(PTR_SIZE);
|
|
|
|
gen_op('+');
|
2020-06-16 13:39:48 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
indir();
|
2015-03-07 05:01:14 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
2019-07-21 09:25:12 +08:00
|
|
|
#ifdef TCC_TARGET_RISCV64
|
|
|
|
case TOK_builtin_va_start:
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(0, "ee");
|
|
|
|
r = vtop->r & VT_VALMASK;
|
2019-07-21 09:25:12 +08:00
|
|
|
if (r == VT_LLOCAL)
|
|
|
|
r = VT_LOCAL;
|
|
|
|
if (r != VT_LOCAL)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("__builtin_va_start expects a local variable");
|
|
|
|
gen_va_start();
|
|
|
|
vstore();
|
2019-07-21 09:25:12 +08:00
|
|
|
break;
|
|
|
|
#endif
|
2013-04-24 09:19:15 +08:00
|
|
|
#ifdef TCC_TARGET_X86_64
|
|
|
|
#ifdef TCC_TARGET_PE
|
|
|
|
case TOK_builtin_va_start:
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(0, "ee");
|
|
|
|
r = vtop->r & VT_VALMASK;
|
2017-05-07 18:41:29 +08:00
|
|
|
if (r == VT_LLOCAL)
|
|
|
|
r = VT_LOCAL;
|
|
|
|
if (r != VT_LOCAL)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("__builtin_va_start expects a local variable");
|
|
|
|
vtop->r = r;
|
|
|
|
vtop->type = char_pointer_type;
|
|
|
|
vtop->c.i += 8;
|
|
|
|
vstore();
|
2013-04-24 09:19:15 +08:00
|
|
|
break;
|
|
|
|
#else
|
2010-12-28 18:32:40 +08:00
|
|
|
case TOK_builtin_va_arg_types:
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(0, "t");
|
|
|
|
vpushi(classify_x86_64_va_arg(&vtop->type));
|
|
|
|
vswap();
|
|
|
|
vpop();
|
2017-12-26 04:32:27 +08:00
|
|
|
break;
|
2013-04-24 09:19:15 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
#endif
|
2015-02-14 02:58:31 +08:00
|
|
|
|
|
|
|
#ifdef TCC_TARGET_ARM64
|
2017-12-26 04:32:27 +08:00
|
|
|
case TOK_builtin_va_start: {
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(0, "ee");
|
2015-02-14 02:58:31 +08:00
|
|
|
//xx check types
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_va_start();
|
|
|
|
vpushi(0);
|
|
|
|
vtop->type.t = VT_VOID;
|
2015-02-14 02:58:31 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-12-26 04:32:27 +08:00
|
|
|
case TOK_builtin_va_arg: {
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(0, "et");
|
|
|
|
type = vtop->type;
|
|
|
|
vpop();
|
2015-02-14 02:58:31 +08:00
|
|
|
//xx check types
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_va_arg(&type);
|
|
|
|
vtop->type = type;
|
2015-02-14 02:58:31 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-03-08 08:10:44 +08:00
|
|
|
case TOK___arm64_clear_cache: {
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_builtin_params(0, "ee");
|
|
|
|
gen_clear_cache();
|
|
|
|
vpushi(0);
|
|
|
|
vtop->type.t = VT_VOID;
|
2015-03-08 08:10:44 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-02-14 02:58:31 +08:00
|
|
|
#endif
|
2020-07-05 20:01:50 +08:00
|
|
|
|
2021-01-26 21:29:07 +08:00
|
|
|
/* atomic operations */
|
2021-02-15 02:41:59 +08:00
|
|
|
case TOK___atomic_store:
|
|
|
|
case TOK___atomic_load:
|
|
|
|
case TOK___atomic_exchange:
|
|
|
|
case TOK___atomic_compare_exchange:
|
|
|
|
case TOK___atomic_fetch_add:
|
|
|
|
case TOK___atomic_fetch_sub:
|
|
|
|
case TOK___atomic_fetch_or:
|
|
|
|
case TOK___atomic_fetch_xor:
|
|
|
|
case TOK___atomic_fetch_and:
|
2022-10-17 00:51:56 +08:00
|
|
|
case TOK___atomic_fetch_nand:
|
|
|
|
case TOK___atomic_add_fetch:
|
|
|
|
case TOK___atomic_sub_fetch:
|
|
|
|
case TOK___atomic_or_fetch:
|
|
|
|
case TOK___atomic_xor_fetch:
|
|
|
|
case TOK___atomic_and_fetch:
|
|
|
|
case TOK___atomic_nand_fetch:
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_atomic(tok);
|
2021-01-26 21:29:07 +08:00
|
|
|
break;
|
|
|
|
|
2015-03-10 23:23:00 +08:00
|
|
|
/* pre operations */
|
2009-05-06 02:18:10 +08:00
|
|
|
case TOK_INC:
|
|
|
|
case TOK_DEC:
|
2021-10-22 13:39:54 +08:00
|
|
|
t = tok;
|
|
|
|
next();
|
|
|
|
unary();
|
|
|
|
inc(0, t);
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case '-':
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
unary();
|
|
|
|
if (is_float(vtop->type.t)) {
|
|
|
|
gen_opif(TOK_NEG);
|
2021-01-04 10:58:22 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
|
|
|
vswap();
|
|
|
|
gen_op('-');
|
2021-01-04 10:58:22 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
case TOK_LAND:
|
|
|
|
if (!gnu_ext)
|
|
|
|
goto tok_identifier;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* allow to take the address of a label */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok < TOK_UIDENT)
|
|
|
|
expect("label identifier");
|
|
|
|
s = label_find(tok);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (!s) {
|
2021-10-22 13:39:54 +08:00
|
|
|
s = label_push(&global_label_stack, tok, LABEL_FORWARD);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
if (s->r == LABEL_DECLARED)
|
|
|
|
s->r = LABEL_FORWARD;
|
|
|
|
}
|
2022-07-07 22:45:16 +08:00
|
|
|
if ((s->type.t & VT_BTYPE) != VT_PTR) {
|
2009-05-06 02:18:10 +08:00
|
|
|
s->type.t = VT_VOID;
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&s->type);
|
2009-05-06 02:18:10 +08:00
|
|
|
s->type.t |= VT_STATIC;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushsym(&s->type, s);
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2017-02-27 09:22:28 +08:00
|
|
|
|
2016-12-09 18:42:41 +08:00
|
|
|
case TOK_GENERIC:
|
|
|
|
{
|
|
|
|
CType controlling_type;
|
|
|
|
int has_default = 0;
|
|
|
|
int has_match = 0;
|
|
|
|
int learn = 0;
|
|
|
|
TokenString *str = NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
int saved_const_wanted = const_wanted;
|
2016-12-09 18:42:41 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
skip('(');
|
|
|
|
const_wanted = 0;
|
|
|
|
expr_type(&controlling_type, expr_eq);
|
mutiples fix for _Generic
* check that _Generic don't match unsigned char * with char *
this case is usefull as with -funsigned-char, 'char *' are unsigned
* change VT_LONG so it's now a qualifier
VT_LONG are never use for code generation, but only durring parsing state,
in _Generic we need to be able to make diference between
'long' and 'long long'
So VT_LONG is now use as a type qualifier, it's old behaviour is still
here, but we can keep trace of what was a long and what wasn't
* add TOK_CLONG and TOK_CULONG
tcc was directly converting value like '7171L' into TOK_CLLONG or
TOK_CINT depending of the machine architecture.
because of that, we was unable to make diference between a long and a
long long, which doesn't work with _Generic.
So now 7171L is a TOK_CLONG, and we can handle _Generic properly
* check that _Generic can make diference between long and long long
* uncomment "type match twice" as it should now pass tests on any platforms
* add inside_generic global
the point of this variable is to use VT_LONG in comparaison only
when we are evaluating a _Generic.
problem is with my lastest patchs tcc can now make the diference between
a 'long long' and a 'long', but in 64 bit stddef.h typedef uint64_t as
typedef signed long long int int64_t and stdint.h as unsigned long int, so tcc
break when stdint.h and stddef.h are include together.
Another solution woud be to modifie include/stddef.h so it define uint64_t as
unsigned long int when processor is 64 bit, but this could break some
legacy code, so for now, VT_LONG are use only inside generc.
* check that _Generic parse first argument correctly
* check that _Generic evaluate correctly exresion like "f() / 2"
2017-07-10 23:44:53 +08:00
|
|
|
controlling_type.t &= ~(VT_CONSTANT | VT_VOLATILE | VT_ARRAY);
|
2018-03-16 07:26:16 +08:00
|
|
|
if ((controlling_type.t & VT_BTYPE) == VT_FUNC)
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&controlling_type);
|
|
|
|
const_wanted = saved_const_wanted;
|
2016-12-09 18:42:41 +08:00
|
|
|
for (;;) {
|
|
|
|
learn = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(',');
|
|
|
|
if (tok == TOK_DEFAULT) {
|
2016-12-09 18:42:41 +08:00
|
|
|
if (has_default)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("too many 'default'");
|
2017-07-21 04:21:27 +08:00
|
|
|
has_default = 1;
|
|
|
|
if (!has_match)
|
2016-12-09 18:42:41 +08:00
|
|
|
learn = 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2016-12-09 18:42:41 +08:00
|
|
|
} else {
|
2017-07-21 04:21:27 +08:00
|
|
|
AttributeDef ad_tmp;
|
2016-12-09 18:42:41 +08:00
|
|
|
int itmp;
|
2017-07-21 04:21:27 +08:00
|
|
|
CType cur_type;
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2022-07-04 21:24:46 +08:00
|
|
|
parse_btype(&cur_type, &ad_tmp, 0);
|
2021-10-22 13:39:54 +08:00
|
|
|
type_decl(&cur_type, &ad_tmp, &itmp, TYPE_ABSTRACT);
|
2016-12-09 18:42:41 +08:00
|
|
|
if (compare_types(&controlling_type, &cur_type, 0)) {
|
|
|
|
if (has_match) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("type match twice");
|
2016-12-09 18:42:41 +08:00
|
|
|
}
|
|
|
|
has_match = 1;
|
|
|
|
learn = 1;
|
|
|
|
}
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(':');
|
2016-12-09 18:42:41 +08:00
|
|
|
if (learn) {
|
2017-07-21 04:21:27 +08:00
|
|
|
if (str)
|
2021-10-22 13:39:54 +08:00
|
|
|
tok_str_free(str);
|
|
|
|
skip_or_save_block(&str);
|
2016-12-09 18:42:41 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
skip_or_save_block(NULL);
|
2016-12-09 18:42:41 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ')')
|
2016-12-09 18:42:41 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-07-21 04:21:27 +08:00
|
|
|
if (!str) {
|
|
|
|
char buf[60];
|
2021-10-22 13:39:54 +08:00
|
|
|
type_to_str(buf, sizeof buf, &controlling_type, NULL);
|
|
|
|
tcc_error("type '%s' does not match any association", buf);
|
2016-12-09 18:42:41 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
begin_macro(str, 1);
|
|
|
|
next();
|
|
|
|
expr_eq();
|
|
|
|
if (tok != TOK_EOF)
|
|
|
|
expect(",");
|
|
|
|
end_macro();
|
|
|
|
next();
|
2016-12-09 18:42:41 +08:00
|
|
|
break;
|
|
|
|
}
|
2010-05-06 08:19:00 +08:00
|
|
|
// special qnan , snan and infinity values
|
|
|
|
case TOK___NAN__:
|
2017-12-24 20:16:09 +08:00
|
|
|
n = 0x7fc00000;
|
|
|
|
special_math_val:
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(n);
|
|
|
|
vtop->type.t = VT_FLOAT;
|
|
|
|
next();
|
2010-05-06 08:19:00 +08:00
|
|
|
break;
|
|
|
|
case TOK___SNAN__:
|
2017-12-24 20:16:09 +08:00
|
|
|
n = 0x7f800001;
|
|
|
|
goto special_math_val;
|
2010-05-06 08:19:00 +08:00
|
|
|
case TOK___INF__:
|
2017-12-24 20:16:09 +08:00
|
|
|
n = 0x7f800000;
|
|
|
|
goto special_math_val;
|
2010-05-06 08:19:00 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
default:
|
|
|
|
tok_identifier:
|
2021-10-22 13:39:54 +08:00
|
|
|
t = tok;
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
if (t < TOK_UIDENT)
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("identifier");
|
|
|
|
s = sym_find(t);
|
2017-11-30 22:15:22 +08:00
|
|
|
if (!s || IS_ASM_SYM(s)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
const char *name = get_tok_str(t, NULL);
|
|
|
|
if (tok != '(')
|
|
|
|
tcc_error("'%s' undeclared", name);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* for simple function calls, we tolerate undeclared
|
|
|
|
external reference to int() function */
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning_c(warn_implicit_function_declaration)(
|
2021-08-01 02:44:51 +08:00
|
|
|
"implicit declaration of function '%s'", name);
|
2021-10-22 13:39:54 +08:00
|
|
|
s = external_global_sym(t, &func_old_type);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-02-14 01:23:43 +08:00
|
|
|
|
|
|
|
r = s->r;
|
|
|
|
/* A symbol that has a register is a local register variable,
|
|
|
|
which starts out as VT_LOCAL value. */
|
|
|
|
if ((r & VT_VALMASK) < VT_CONST)
|
|
|
|
r = (r & ~VT_VALMASK) | VT_LOCAL;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(&s->type, r, s->c);
|
2016-10-06 10:05:30 +08:00
|
|
|
/* Point to s as backpointer (even without r&VT_SYM).
|
|
|
|
Will be used by at least the x86 inline asm parser for
|
|
|
|
regvars. */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->sym = s;
|
2017-07-09 18:38:25 +08:00
|
|
|
|
|
|
|
if (r & VT_SYM) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = 0;
|
2017-07-09 18:38:25 +08:00
|
|
|
} else if (r == VT_CONST && IS_ENUM_VAL(s->type.t)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i = s->enum_val;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2015-07-30 04:53:57 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* post operations */
|
|
|
|
while (1) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == TOK_INC || tok == TOK_DEC) {
|
|
|
|
inc(1, tok);
|
|
|
|
next();
|
|
|
|
} else if (tok == '.' || tok == TOK_ARROW || tok == TOK_CDOUBLE) {
|
2019-04-11 06:30:41 +08:00
|
|
|
int qualifiers, cumofs = 0;
|
2015-07-30 04:53:57 +08:00
|
|
|
/* field */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == TOK_ARROW)
|
|
|
|
indir();
|
|
|
|
qualifiers = vtop->type.t & (VT_CONSTANT | VT_VOLATILE);
|
|
|
|
test_lvalue();
|
|
|
|
gaddrof();
|
2009-05-06 02:18:10 +08:00
|
|
|
/* expect pointer on structure */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) != VT_STRUCT)
|
|
|
|
expect("struct or union");
|
|
|
|
if (tok == TOK_CDOUBLE)
|
|
|
|
expect("field name");
|
|
|
|
next();
|
|
|
|
if (tok == TOK_CINT || tok == TOK_CUINT)
|
|
|
|
expect("field name");
|
|
|
|
s = find_field(&vtop->type, tok, &cumofs);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* add field offset to pointer */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type = char_pointer_type; /* change type to 'char *' */
|
2022-12-22 20:08:36 +08:00
|
|
|
vpushi(cumofs);
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op('+');
|
2009-05-06 02:18:10 +08:00
|
|
|
/* change type to field type, and set to lvalue */
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type = s->type;
|
|
|
|
vtop->type.t |= qualifiers;
|
2009-05-06 02:18:10 +08:00
|
|
|
/* an array is never an lvalue */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!(vtop->type.t & VT_ARRAY)) {
|
|
|
|
vtop->r |= VT_LVAL;
|
2009-12-20 05:22:43 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2009-05-06 02:18:10 +08:00
|
|
|
/* if bound checking, the referenced pointer must be checked */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->do_bounds_check)
|
|
|
|
vtop->r |= VT_MUSTBOUND;
|
2009-12-20 05:22:43 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
} else if (tok == '[') {
|
|
|
|
next();
|
|
|
|
gexpr();
|
|
|
|
gen_op('+');
|
|
|
|
indir();
|
|
|
|
skip(']');
|
|
|
|
} else if (tok == '(') {
|
2009-05-06 02:18:10 +08:00
|
|
|
SValue ret;
|
|
|
|
Sym *sa;
|
2015-03-09 07:19:59 +08:00
|
|
|
int nb_args, ret_nregs, ret_align, regsize, variadic;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
/* function call */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) != VT_FUNC) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* pointer test (no array accepted) */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & (VT_BTYPE | VT_ARRAY)) == VT_PTR) {
|
|
|
|
vtop->type = *pointed_type(&vtop->type);
|
|
|
|
if ((vtop->type.t & VT_BTYPE) != VT_FUNC)
|
2009-05-06 02:18:10 +08:00
|
|
|
goto error_func;
|
|
|
|
} else {
|
|
|
|
error_func:
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("function pointer");
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r &= ~VT_LVAL; /* no lvalue */
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
/* get return type */
|
2021-10-22 13:39:54 +08:00
|
|
|
s = vtop->type.ref;
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
sa = s->next; /* first parameter */
|
2017-02-14 01:23:43 +08:00
|
|
|
nb_args = regsize = 0;
|
2009-05-06 02:18:10 +08:00
|
|
|
ret.r2 = VT_CONST;
|
|
|
|
/* compute first implicit argument if a structure is returned */
|
|
|
|
if ((s->type.t & VT_BTYPE) == VT_STRUCT) {
|
2017-07-09 18:34:11 +08:00
|
|
|
variadic = (s->f.func_type == FUNC_ELLIPSIS);
|
2015-07-30 04:57:41 +08:00
|
|
|
ret_nregs = gfunc_sret(&s->type, variadic, &ret.type,
|
|
|
|
&ret_align, ®size);
|
2019-08-11 03:21:43 +08:00
|
|
|
if (ret_nregs <= 0) {
|
2013-04-19 00:27:34 +08:00
|
|
|
/* get some space for the returned structure */
|
|
|
|
size = type_size(&s->type, &align);
|
2015-02-14 02:58:31 +08:00
|
|
|
#ifdef TCC_TARGET_ARM64
|
|
|
|
/* On arm64, a small struct is return in registers.
|
|
|
|
It is much easier to write it to memory if we know
|
|
|
|
that we are allowed to write some extra bytes, so
|
|
|
|
round the allocated space up to a power of 2: */
|
|
|
|
if (size < 16)
|
|
|
|
while (size & (size - 1))
|
|
|
|
size = (size | (size - 1)) + 1;
|
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
loc = (loc - size) & -align;
|
2013-04-19 00:27:34 +08:00
|
|
|
ret.type = s->type;
|
|
|
|
ret.r = VT_LOCAL | VT_LVAL;
|
|
|
|
/* pass it as 'int' to avoid structure arg passing
|
|
|
|
problems */
|
2021-10-22 13:39:54 +08:00
|
|
|
vseti(VT_LOCAL, loc);
|
2020-10-01 23:09:09 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tcc_state->do_bounds_check)
|
|
|
|
--loc;
|
2020-10-01 23:09:09 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
ret.c = vtop->c;
|
2019-08-11 03:21:43 +08:00
|
|
|
if (ret_nregs < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
2019-08-11 03:21:43 +08:00
|
|
|
else
|
|
|
|
nb_args++;
|
2013-04-19 00:27:34 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2013-12-16 22:38:10 +08:00
|
|
|
ret_nregs = 1;
|
2013-04-19 00:27:34 +08:00
|
|
|
ret.type = s->type;
|
|
|
|
}
|
|
|
|
|
2019-08-11 03:21:43 +08:00
|
|
|
if (ret_nregs > 0) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* return in register */
|
|
|
|
ret.c.i = 0;
|
2019-12-17 01:44:35 +08:00
|
|
|
PUT_R_RET(&ret, ret.type.t);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != ')') {
|
2009-05-06 02:18:10 +08:00
|
|
|
for(;;) {
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_eq();
|
|
|
|
gfunc_param_typed(s, sa);
|
2009-05-06 02:18:10 +08:00
|
|
|
nb_args++;
|
|
|
|
if (sa)
|
|
|
|
sa = sa->next;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ')')
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(',');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sa)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("too few arguments to function");
|
|
|
|
skip(')');
|
|
|
|
gfunc_call(nb_args);
|
2013-12-16 22:38:10 +08:00
|
|
|
|
2019-08-11 03:21:43 +08:00
|
|
|
if (ret_nregs < 0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vsetc(&ret.type, ret.r, &ret.c);
|
2019-08-11 03:21:43 +08:00
|
|
|
#ifdef TCC_TARGET_RISCV64
|
2021-10-22 13:39:54 +08:00
|
|
|
arch_transfer_ret_regs(1);
|
2019-08-11 03:21:43 +08:00
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
/* return value */
|
|
|
|
for (r = ret.r + ret_nregs + !ret_nregs; r-- > ret.r;) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vsetc(&ret.type, r, &ret.c);
|
|
|
|
vtop->r2 = ret.r2; /* Loop only happens when r2 is VT_CONST */
|
2019-08-11 03:21:43 +08:00
|
|
|
}
|
2013-12-16 22:38:10 +08:00
|
|
|
|
2019-08-11 03:21:43 +08:00
|
|
|
/* handle packed struct return */
|
|
|
|
if (((s->type.t & VT_BTYPE) == VT_STRUCT) && ret_nregs) {
|
|
|
|
int addr, offset;
|
|
|
|
|
|
|
|
size = type_size(&s->type, &align);
|
|
|
|
/* We're writing whole regs often, make sure there's enough
|
|
|
|
space. Assume register size is power of 2. */
|
|
|
|
if (regsize > align)
|
|
|
|
align = regsize;
|
2021-10-22 13:39:54 +08:00
|
|
|
loc = (loc - size) & -align;
|
|
|
|
addr = loc;
|
2019-08-11 03:21:43 +08:00
|
|
|
offset = 0;
|
|
|
|
for (;;) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(&ret.type, VT_LOCAL | VT_LVAL, addr + offset);
|
|
|
|
vswap();
|
|
|
|
vstore();
|
|
|
|
vtop--;
|
2019-08-11 03:21:43 +08:00
|
|
|
if (--ret_nregs == 0)
|
|
|
|
break;
|
|
|
|
offset += regsize;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(&s->type, VT_LOCAL | VT_LVAL, addr);
|
2013-11-22 09:27:15 +08:00
|
|
|
}
|
2019-09-23 23:45:39 +08:00
|
|
|
|
|
|
|
/* Promote char/short return values. This is matters only
|
Adjust return value promotion for some archs
this is a bit complicated: for i386 and x86-64 we really need to
extend return values ourself, as the common code now does. For arm64
this at least preserves old behaviour. For riscv64 we don't have to
extend ourself but can expect things to be extended up to int (this
matters for var-args tests, when the sign-extension to int64 needs to
happen explicitely). As the extensions are useless, don't do them.
And for arm32 we actually can't express GCC behaviour: the callee side
expects the return value to be correctly extended to int32, but
remembers the original type. In case the ultimate target type for the
call result is only int, no further extension is done. But in case
the target type is e.g. int64 an extension happens, but not from int32
but from the original type. We don't know the ultimate target type,
so we have to choose a type to put into vtop:
* original type (plus VT_MUSTCAST) - this looses when the ultimate
target is int (GCC: no cast, TCC: a cast)
* int (without MUSTCAST) - this looses when the ultimate target is
int64 (GCC: cast from original type, TCC: cast from int)
This difference can only be seen with undefined sources, like the
testcases, so it doesn't seem worthwhile to try an make it work, just
disable the test on arm and choose the second variant as that generates
less code.
2019-12-17 08:46:06 +08:00
|
|
|
for calling function that were not compiled by TCC and
|
|
|
|
only on some architectures. For those where it doesn't
|
|
|
|
matter we expect things to be already promoted to int,
|
|
|
|
but not larger. */
|
2019-09-23 23:45:39 +08:00
|
|
|
t = s->type.t & VT_BTYPE;
|
Adjust return value promotion for some archs
this is a bit complicated: for i386 and x86-64 we really need to
extend return values ourself, as the common code now does. For arm64
this at least preserves old behaviour. For riscv64 we don't have to
extend ourself but can expect things to be extended up to int (this
matters for var-args tests, when the sign-extension to int64 needs to
happen explicitely). As the extensions are useless, don't do them.
And for arm32 we actually can't express GCC behaviour: the callee side
expects the return value to be correctly extended to int32, but
remembers the original type. In case the ultimate target type for the
call result is only int, no further extension is done. But in case
the target type is e.g. int64 an extension happens, but not from int32
but from the original type. We don't know the ultimate target type,
so we have to choose a type to put into vtop:
* original type (plus VT_MUSTCAST) - this looses when the ultimate
target is int (GCC: no cast, TCC: a cast)
* int (without MUSTCAST) - this looses when the ultimate target is
int64 (GCC: cast from original type, TCC: cast from int)
This difference can only be seen with undefined sources, like the
testcases, so it doesn't seem worthwhile to try an make it work, just
disable the test on arm and choose the second variant as that generates
less code.
2019-12-17 08:46:06 +08:00
|
|
|
if (t == VT_BYTE || t == VT_SHORT || t == VT_BOOL) {
|
|
|
|
#ifdef PROMOTE_RET
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->r |= BFVAL(VT_MUSTCAST, 1);
|
Adjust return value promotion for some archs
this is a bit complicated: for i386 and x86-64 we really need to
extend return values ourself, as the common code now does. For arm64
this at least preserves old behaviour. For riscv64 we don't have to
extend ourself but can expect things to be extended up to int (this
matters for var-args tests, when the sign-extension to int64 needs to
happen explicitely). As the extensions are useless, don't do them.
And for arm32 we actually can't express GCC behaviour: the callee side
expects the return value to be correctly extended to int32, but
remembers the original type. In case the ultimate target type for the
call result is only int, no further extension is done. But in case
the target type is e.g. int64 an extension happens, but not from int32
but from the original type. We don't know the ultimate target type,
so we have to choose a type to put into vtop:
* original type (plus VT_MUSTCAST) - this looses when the ultimate
target is int (GCC: no cast, TCC: a cast)
* int (without MUSTCAST) - this looses when the ultimate target is
int64 (GCC: cast from original type, TCC: cast from int)
This difference can only be seen with undefined sources, like the
testcases, so it doesn't seem worthwhile to try an make it work, just
disable the test on arm and choose the second variant as that generates
less code.
2019-12-17 08:46:06 +08:00
|
|
|
#else
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type.t = VT_INT;
|
Adjust return value promotion for some archs
this is a bit complicated: for i386 and x86-64 we really need to
extend return values ourself, as the common code now does. For arm64
this at least preserves old behaviour. For riscv64 we don't have to
extend ourself but can expect things to be extended up to int (this
matters for var-args tests, when the sign-extension to int64 needs to
happen explicitely). As the extensions are useless, don't do them.
And for arm32 we actually can't express GCC behaviour: the callee side
expects the return value to be correctly extended to int32, but
remembers the original type. In case the ultimate target type for the
call result is only int, no further extension is done. But in case
the target type is e.g. int64 an extension happens, but not from int32
but from the original type. We don't know the ultimate target type,
so we have to choose a type to put into vtop:
* original type (plus VT_MUSTCAST) - this looses when the ultimate
target is int (GCC: no cast, TCC: a cast)
* int (without MUSTCAST) - this looses when the ultimate target is
int64 (GCC: cast from original type, TCC: cast from int)
This difference can only be seen with undefined sources, like the
testcases, so it doesn't seem worthwhile to try an make it work, just
disable the test on arm and choose the second variant as that generates
less code.
2019-12-17 08:46:06 +08:00
|
|
|
#endif
|
|
|
|
}
|
2013-04-19 00:27:34 +08:00
|
|
|
}
|
2021-01-24 01:17:38 +08:00
|
|
|
if (s->f.func_noreturn) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_tcov_block_end(tcc_state, -1);
|
2019-04-29 19:53:07 +08:00
|
|
|
CODE_OFF();
|
2021-01-24 01:17:38 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-21 21:23:57 +08:00
|
|
|
#ifndef precedence_parser /* original top-down parser */
|
|
|
|
|
|
|
|
static void expr_prod(void)
|
2020-01-13 08:06:25 +08:00
|
|
|
{
|
2020-01-21 21:23:57 +08:00
|
|
|
int t;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
unary();
|
|
|
|
while ((t = tok) == '*' || t == '/' || t == '%') {
|
|
|
|
next();
|
|
|
|
unary();
|
|
|
|
gen_op(t);
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void expr_sum(void)
|
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
expr_prod();
|
2021-10-22 13:39:54 +08:00
|
|
|
while ((t = tok) == '+' || t == '-') {
|
|
|
|
next();
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_prod();
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(t);
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void expr_shift(void)
|
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
expr_sum();
|
2021-10-22 13:39:54 +08:00
|
|
|
while ((t = tok) == TOK_SHL || t == TOK_SAR) {
|
|
|
|
next();
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_sum();
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(t);
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void expr_cmp(void)
|
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
expr_shift();
|
2021-10-22 13:39:54 +08:00
|
|
|
while (((t = tok) >= TOK_ULE && t <= TOK_GT) ||
|
2020-01-21 21:23:57 +08:00
|
|
|
t == TOK_ULT || t == TOK_UGE) {
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_shift();
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(t);
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void expr_cmpeq(void)
|
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
expr_cmp();
|
2021-10-22 13:39:54 +08:00
|
|
|
while ((t = tok) == TOK_EQ || t == TOK_NE) {
|
|
|
|
next();
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_cmp();
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(t);
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void expr_and(void)
|
|
|
|
{
|
|
|
|
expr_cmpeq();
|
2021-10-22 13:39:54 +08:00
|
|
|
while (tok == '&') {
|
|
|
|
next();
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_cmpeq();
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op('&');
|
2020-01-13 08:06:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-21 21:23:57 +08:00
|
|
|
static void expr_xor(void)
|
|
|
|
{
|
|
|
|
expr_and();
|
2021-10-22 13:39:54 +08:00
|
|
|
while (tok == '^') {
|
|
|
|
next();
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_and();
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op('^');
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void expr_or(void)
|
|
|
|
{
|
|
|
|
expr_xor();
|
2021-10-22 13:39:54 +08:00
|
|
|
while (tok == '|') {
|
|
|
|
next();
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_xor();
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op('|');
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_landor(int op);
|
2020-01-21 21:23:57 +08:00
|
|
|
|
|
|
|
static void expr_land(void)
|
|
|
|
{
|
|
|
|
expr_or();
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == TOK_LAND)
|
|
|
|
expr_landor(tok);
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void expr_lor(void)
|
|
|
|
{
|
|
|
|
expr_land();
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == TOK_LOR)
|
|
|
|
expr_landor(tok);
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
# define expr_landor_next(op) op == TOK_LAND ? expr_or() : expr_land()
|
|
|
|
#else /* defined precedence_parser */
|
2021-10-22 13:39:54 +08:00
|
|
|
# define expr_landor_next(op) unary(), expr_infix(precedence(op) + 1)
|
|
|
|
# define expr_lor() unary(), expr_infix(1)
|
2020-01-21 21:23:57 +08:00
|
|
|
|
2017-01-17 10:56:42 +08:00
|
|
|
static int precedence(int tok)
|
|
|
|
{
|
|
|
|
switch (tok) {
|
2020-01-13 08:06:25 +08:00
|
|
|
case TOK_LOR: return 1;
|
|
|
|
case TOK_LAND: return 2;
|
|
|
|
case '|': return 3;
|
|
|
|
case '^': return 4;
|
|
|
|
case '&': return 5;
|
|
|
|
case TOK_EQ: case TOK_NE: return 6;
|
|
|
|
relat: case TOK_ULT: case TOK_UGE: return 7;
|
|
|
|
case TOK_SHL: case TOK_SAR: return 8;
|
|
|
|
case '+': case '-': return 9;
|
|
|
|
case '*': case '/': case '%': return 10;
|
2017-01-17 10:56:42 +08:00
|
|
|
default:
|
|
|
|
if (tok >= TOK_ULE && tok <= TOK_GT)
|
|
|
|
goto relat;
|
|
|
|
return 0;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
static unsigned char prec[256];
|
|
|
|
static void init_prec(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2017-01-17 10:56:42 +08:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < 256; i++)
|
2021-10-22 13:39:54 +08:00
|
|
|
prec[i] = precedence(i);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
#define precedence(i) ((unsigned)i < 256 ? prec[i] : 0)
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_landor(int op);
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_infix(int p)
|
2016-09-04 11:23:57 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
int t = tok, p2;
|
2020-01-13 08:06:25 +08:00
|
|
|
while ((p2 = precedence(t)) >= p) {
|
|
|
|
if (t == TOK_LOR || t == TOK_LAND) {
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_landor(t);
|
2020-01-13 08:06:25 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
unary();
|
|
|
|
if (precedence(tok) > p2)
|
|
|
|
expr_infix(p2 + 1);
|
|
|
|
gen_op(t);
|
2020-01-13 08:06:25 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
t = tok;
|
2016-09-04 11:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
2020-01-21 21:23:57 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Assuming vtop is a value used in a conditional context
|
|
|
|
(i.e. compared with zero) return 0 if it's false, 1 if
|
|
|
|
true and -1 if it can't be statically determined. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static int condition_3way(void)
|
2020-01-21 21:23:57 +08:00
|
|
|
{
|
|
|
|
int c = -1;
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
|
|
|
|
(!(vtop->r & VT_SYM) || !vtop->sym->a.weak)) {
|
|
|
|
vdup();
|
|
|
|
gen_cast_s(VT_BOOL);
|
|
|
|
c = vtop->c.i;
|
|
|
|
vpop();
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_landor(int op)
|
2020-01-21 21:23:57 +08:00
|
|
|
{
|
|
|
|
int t = 0, cc = 1, f = 0, i = op == TOK_LAND, c;
|
|
|
|
for(;;) {
|
2021-10-22 13:39:54 +08:00
|
|
|
c = f ? i : condition_3way();
|
2020-01-21 21:23:57 +08:00
|
|
|
if (c < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
save_regs(1), cc = 0;
|
2020-01-21 21:23:57 +08:00
|
|
|
else if (c != i)
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted++, f = 1;
|
|
|
|
if (tok != op)
|
2020-01-21 21:23:57 +08:00
|
|
|
break;
|
|
|
|
if (c < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
t = gvtst(i, t);
|
2020-01-21 21:23:57 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
|
|
|
next();
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_landor_next(op);
|
|
|
|
}
|
|
|
|
if (cc || f) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
|
|
|
vpushi(i ^ f);
|
|
|
|
gsym(t);
|
|
|
|
nocode_wanted -= f;
|
2020-01-21 21:23:57 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gvtst_set(i, t);
|
2020-01-21 21:23:57 +08:00
|
|
|
}
|
|
|
|
}
|
2016-09-04 11:23:57 +08:00
|
|
|
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
static int is_cond_bool(SValue *sv)
|
|
|
|
{
|
|
|
|
if ((sv->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST
|
|
|
|
&& (sv->type.t & VT_BTYPE) == VT_INT)
|
|
|
|
return (unsigned)sv->c.i < 2;
|
|
|
|
if (sv->r == VT_CMP)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_cond(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2020-01-18 09:36:29 +08:00
|
|
|
int tt, u, r1, r2, rc, t1, t2, islv, c, g;
|
2009-05-06 02:18:10 +08:00
|
|
|
SValue sv;
|
2020-01-18 09:36:29 +08:00
|
|
|
CType type;
|
2019-04-29 19:53:07 +08:00
|
|
|
int ncw_prev;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2020-01-21 21:23:57 +08:00
|
|
|
expr_lor();
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '?') {
|
|
|
|
next();
|
|
|
|
c = condition_3way();
|
|
|
|
g = (tok == ':' && gnu_ext);
|
2019-04-29 19:53:07 +08:00
|
|
|
tt = 0;
|
|
|
|
if (!g) {
|
|
|
|
if (c < 0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
save_regs(1);
|
|
|
|
tt = gvtst(1, 0);
|
2019-04-29 19:53:07 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
2019-04-29 19:53:07 +08:00
|
|
|
}
|
|
|
|
} else if (c < 0) {
|
2016-12-19 01:55:55 +08:00
|
|
|
/* needed to avoid having different registers saved in
|
|
|
|
each branch */
|
2021-10-22 13:39:54 +08:00
|
|
|
save_regs(1);
|
|
|
|
gv_dup();
|
|
|
|
tt = gvtst(0, 0);
|
2016-12-19 01:55:55 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ncw_prev = nocode_wanted;
|
2020-01-18 09:36:29 +08:00
|
|
|
if (c == 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted++;
|
2020-01-18 09:36:29 +08:00
|
|
|
if (!g)
|
2021-10-22 13:39:54 +08:00
|
|
|
gexpr();
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) == VT_FUNC)
|
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
sv = *vtop; /* save value to handle it later */
|
|
|
|
vtop--; /* no vpop so that FP stack is not flushed */
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2020-01-18 09:36:29 +08:00
|
|
|
if (g) {
|
|
|
|
u = tt;
|
|
|
|
} else if (c < 0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
u = gjmp(0);
|
|
|
|
gsym(tt);
|
2020-01-18 09:36:29 +08:00
|
|
|
} else
|
|
|
|
u = 0;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted = ncw_prev;
|
2020-01-18 09:36:29 +08:00
|
|
|
if (c == 1)
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted++;
|
|
|
|
skip(':');
|
|
|
|
expr_cond();
|
2020-01-18 09:36:29 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (c < 0 && is_cond_bool(vtop) && is_cond_bool(&sv)) {
|
2021-01-24 23:20:48 +08:00
|
|
|
/* optimize "if (f ? a > b : c || d) ..." for example, where normally
|
|
|
|
"a < b" and "c || d" would be forced to "(int)0/1" first, whereas
|
|
|
|
this code jumps directly to the if's then/else branches. */
|
2021-10-22 13:39:54 +08:00
|
|
|
t1 = gvtst(0, 0);
|
|
|
|
t2 = gjmp(0);
|
|
|
|
gsym(u);
|
|
|
|
vpushv(&sv);
|
2021-01-24 23:20:48 +08:00
|
|
|
/* combine jump targets of 2nd op with VT_CMP of 1st op */
|
2021-10-22 13:39:54 +08:00
|
|
|
gvtst_set(0, t1);
|
|
|
|
gvtst_set(1, t2);
|
|
|
|
nocode_wanted = ncw_prev;
|
|
|
|
// tcc_warning("two conditions expr_cond");
|
2020-01-18 09:36:29 +08:00
|
|
|
return;
|
|
|
|
}
|
2016-12-19 01:55:55 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->type.t & VT_BTYPE) == VT_FUNC)
|
|
|
|
mk_pointer(&vtop->type);
|
2020-01-18 09:36:29 +08:00
|
|
|
|
|
|
|
/* cast operands to correct type according to ISOC rules */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!combine_types(&type, &sv, vtop, '?'))
|
|
|
|
type_incompatibility_error(&sv.type, &vtop->type,
|
2020-01-18 09:36:29 +08:00
|
|
|
"type mismatch in conditional expression (have '%s' and '%s')");
|
|
|
|
/* keep structs lvalue by transforming `(expr ? a : b)` to `*(expr ? &a : &b)` so
|
|
|
|
that `(expr ? a : b).mem` does not error with "lvalue expected" */
|
2021-10-22 13:39:54 +08:00
|
|
|
islv = (vtop->r & VT_LVAL) && (sv.r & VT_LVAL) && VT_STRUCT == (type.t & VT_BTYPE);
|
2020-01-18 09:36:29 +08:00
|
|
|
|
|
|
|
/* now we convert second operand */
|
|
|
|
if (c != 1) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_cast(&type);
|
2020-01-18 09:36:29 +08:00
|
|
|
if (islv) {
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
gaddrof();
|
|
|
|
} else if (VT_STRUCT == (vtop->type.t & VT_BTYPE))
|
|
|
|
gaddrof();
|
2020-01-18 09:36:29 +08:00
|
|
|
}
|
2016-12-19 01:55:55 +08:00
|
|
|
|
2020-01-18 09:36:29 +08:00
|
|
|
rc = RC_TYPE(type.t);
|
|
|
|
/* for long longs, we use fixed registers to avoid having
|
|
|
|
to handle a complicated move */
|
|
|
|
if (USING_TWO_WORDS(type.t))
|
|
|
|
rc = RC_RET(type.t);
|
2016-12-19 01:55:55 +08:00
|
|
|
|
2020-01-18 09:36:29 +08:00
|
|
|
tt = r2 = 0;
|
|
|
|
if (c < 0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
r2 = gv(rc);
|
|
|
|
tt = gjmp(0);
|
2020-01-18 09:36:29 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(u);
|
|
|
|
nocode_wanted = ncw_prev;
|
2016-12-19 01:55:55 +08:00
|
|
|
|
2020-01-18 09:36:29 +08:00
|
|
|
/* this is horrible, but we must also convert first
|
|
|
|
operand */
|
|
|
|
if (c != 0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
*vtop = sv;
|
|
|
|
gen_cast(&type);
|
2020-01-18 09:36:29 +08:00
|
|
|
if (islv) {
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
gaddrof();
|
|
|
|
} else if (VT_STRUCT == (vtop->type.t & VT_BTYPE))
|
|
|
|
gaddrof();
|
2020-01-18 09:36:29 +08:00
|
|
|
}
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2020-01-18 09:36:29 +08:00
|
|
|
if (c < 0) {
|
2021-10-22 13:39:54 +08:00
|
|
|
r1 = gv(rc);
|
|
|
|
move_reg(r2, r1, islv ? VT_PTR : type.t);
|
|
|
|
vtop->r = r2;
|
|
|
|
gsym(tt);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2020-01-18 09:36:29 +08:00
|
|
|
|
|
|
|
if (islv)
|
2021-10-22 13:39:54 +08:00
|
|
|
indir();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_eq(void)
|
2010-10-25 22:40:30 +08:00
|
|
|
{
|
|
|
|
int t;
|
2015-07-30 04:53:57 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_cond();
|
|
|
|
if ((t = tok) == '=' || TOK_ASSIGN(t)) {
|
|
|
|
test_lvalue();
|
|
|
|
next();
|
2010-10-25 22:40:30 +08:00
|
|
|
if (t == '=') {
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_eq();
|
2010-10-25 22:40:30 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
|
|
|
expr_eq();
|
|
|
|
gen_op(TOK_ASSIGN_OP(t));
|
2010-10-25 22:40:30 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vstore();
|
2010-10-25 22:40:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC void gexpr(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
while (1) {
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_eq();
|
|
|
|
if (tok != ',')
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
2022-11-29 14:56:26 +08:00
|
|
|
constant_p &= (vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
|
|
|
|
!((vtop->r & VT_SYM) && vtop->sym->a.addrtaken);
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* parse a constant expression and return value in vtop. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void expr_const1(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
const_wanted++;
|
|
|
|
nocode_wanted += unevalmask + 1;
|
|
|
|
expr_cond();
|
|
|
|
nocode_wanted -= unevalmask + 1;
|
|
|
|
const_wanted--;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* parse an integer constant and return its value. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static inline int64_t expr_const64(void)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2016-11-06 12:02:11 +08:00
|
|
|
int64_t c;
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_const1();
|
|
|
|
if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) != VT_CONST)
|
|
|
|
expect("constant expression");
|
|
|
|
c = vtop->c.i;
|
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2016-11-06 12:02:11 +08:00
|
|
|
/* parse an integer constant and return its value.
|
|
|
|
Complain if it doesn't fit 32bit (signed or unsigned). */
|
2021-10-22 13:39:54 +08:00
|
|
|
ST_FUNC int expr_const(void)
|
2016-11-06 12:02:11 +08:00
|
|
|
{
|
|
|
|
int c;
|
2021-10-22 13:39:54 +08:00
|
|
|
int64_t wc = expr_const64();
|
2016-11-06 12:02:11 +08:00
|
|
|
c = wc;
|
|
|
|
if (c != wc && (unsigned)c != wc)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("constant exceeds 32 bit");
|
2016-11-06 12:02:11 +08:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2019-04-29 19:53:07 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* return from function */
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2017-02-09 02:45:31 +08:00
|
|
|
#ifndef TCC_TARGET_ARM64
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gfunc_return(CType *func_type)
|
2017-02-09 02:45:31 +08:00
|
|
|
{
|
|
|
|
if ((func_type->t & VT_BTYPE) == VT_STRUCT) {
|
|
|
|
CType type, ret_type;
|
|
|
|
int ret_align, ret_nregs, regsize;
|
2021-10-22 13:39:54 +08:00
|
|
|
ret_nregs = gfunc_sret(func_type, func_var, &ret_type,
|
2017-02-09 02:45:31 +08:00
|
|
|
&ret_align, ®size);
|
2019-08-11 03:21:43 +08:00
|
|
|
if (ret_nregs < 0) {
|
|
|
|
#ifdef TCC_TARGET_RISCV64
|
2021-10-22 13:39:54 +08:00
|
|
|
arch_transfer_ret_regs(0);
|
2019-08-11 03:21:43 +08:00
|
|
|
#endif
|
|
|
|
} else if (0 == ret_nregs) {
|
2017-02-09 02:45:31 +08:00
|
|
|
/* if returning structure, must copy it to implicit
|
|
|
|
first pointer arg location */
|
|
|
|
type = *func_type;
|
2021-10-22 13:39:54 +08:00
|
|
|
mk_pointer(&type);
|
|
|
|
vset(&type, VT_LOCAL | VT_LVAL, func_vc);
|
|
|
|
indir();
|
|
|
|
vswap();
|
2017-02-09 02:45:31 +08:00
|
|
|
/* copy structure value to pointer */
|
2021-10-22 13:39:54 +08:00
|
|
|
vstore();
|
2017-02-09 02:45:31 +08:00
|
|
|
} else {
|
|
|
|
/* returning structure packed into registers */
|
2019-12-17 01:44:35 +08:00
|
|
|
int size, addr, align, rc;
|
2017-02-09 02:45:31 +08:00
|
|
|
size = type_size(func_type,&align);
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r != (VT_LOCAL | VT_LVAL) ||
|
|
|
|
(vtop->c.i & (ret_align-1)))
|
2017-02-09 02:45:31 +08:00
|
|
|
&& (align & (ret_align-1))) {
|
2021-10-22 13:39:54 +08:00
|
|
|
loc = (loc - size) & -ret_align;
|
|
|
|
addr = loc;
|
2017-02-09 02:45:31 +08:00
|
|
|
type = *func_type;
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(&type, VT_LOCAL | VT_LVAL, addr);
|
|
|
|
vswap();
|
|
|
|
vstore();
|
|
|
|
vpop();
|
|
|
|
vset(&ret_type, VT_LOCAL | VT_LVAL, addr);
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->type = ret_type;
|
2019-12-17 01:44:35 +08:00
|
|
|
rc = RC_RET(ret_type.t);
|
2017-02-09 02:45:31 +08:00
|
|
|
if (ret_nregs == 1)
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(rc);
|
2017-02-09 02:45:31 +08:00
|
|
|
else {
|
|
|
|
for (;;) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
|
|
|
gv(rc);
|
|
|
|
vpop();
|
2017-02-09 02:45:31 +08:00
|
|
|
if (--ret_nregs == 0)
|
|
|
|
break;
|
|
|
|
/* We assume that when a structure is returned in multiple
|
|
|
|
registers, their classes are consecutive values of the
|
|
|
|
suite s(n) = 2^n */
|
2019-12-17 01:44:35 +08:00
|
|
|
rc <<= 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop->c.i += regsize;
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gv(RC_RET(func_type->t));
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--; /* NOT vpop() because on x86 it would flush the fp stack */
|
2017-02-09 02:45:31 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void check_func_return(void)
|
2019-06-22 19:18:54 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((func_vt.t & VT_BTYPE) == VT_VOID)
|
2019-06-22 19:18:54 +08:00
|
|
|
return;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!strcmp (funcname, "main")
|
|
|
|
&& (func_vt.t & VT_BTYPE) == VT_INT) {
|
2019-06-22 19:18:54 +08:00
|
|
|
/* main returns 0 by default */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
|
|
|
gen_assign_cast(&func_vt);
|
|
|
|
gfunc_return(&func_vt);
|
2019-06-22 19:18:54 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("function might return no value: '%s'", funcname);
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* switch/case */
|
|
|
|
|
Fix switch/case on uint64_t
The switch/case operation was entirely performed on int64_t, resulting
in a warning and bad code to be emitted on 64 bit machines when used on
an unsigned long with a case range whose signed representation starts
positive and ends negative like in the example below:
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
int nbdg(unsigned long n)
{
switch (n) {
case 1UL ... 9UL: return 1;
case 10UL ... 99UL: return 2;
case 100UL ... 999UL: return 3;
case 1000UL ... 9999UL: return 4;
case 10000UL ... 99999UL: return 5;
case 100000UL ... 999999UL: return 6;
case 1000000UL ... 9999999UL: return 7;
case 10000000UL ... 99999999UL: return 8;
case 100000000UL ... 999999999UL: return 9;
case 1000000000UL ... 9999999999UL: return 10;
case 10000000000UL ... 99999999999UL: return 11;
case 100000000000UL ... 999999999999UL: return 12;
case 1000000000000UL ... 9999999999999UL: return 13;
case 10000000000000UL ... 99999999999999UL: return 14;
case 100000000000000UL ... 999999999999999UL: return 15;
case 1000000000000000UL ... 9999999999999999UL: return 16;
case 10000000000000000UL ... 99999999999999999UL: return 17;
case 100000000000000000UL ... 999999999999999999UL: return 18;
case 1000000000000000000UL ... 9999999999999999999UL: return 19; // this one
case 10000000000000000000UL ... ULONG_MAX: return 20;
}
return 0;
}
int main(int argc, char **argv)
{
unsigned long v = strtoul(argc > 1 ? argv[1] : "1111", NULL, 0);
printf("%lu : %d\n", v, nbdg(v));
return 0;
}
$ tcc dg.c
dg.c:26: warning: empty case range
$ x="";for i in 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0; do x=$x$i; ./a.out $x;done
1 : 1
12 : 2
123 : 3
1234 : 4
12345 : 5
123456 : 6
1234567 : 7
12345678 : 8
123456789 : 9
1234567890 : 10
12345678901 : 11
123456789012 : 12
1234567890123 : 13
12345678901234 : 14
123456789012345 : 15
1234567890123456 : 16
12345678901234567 : 17
123456789012345678 : 18
1234567890123456789 : 0
12345678901234567890 : 20
What this patch does is to use a separate set of signed and unsigned
case_cmp functions depending on whether the expression is signed or
unsigned, and also does this to decide when to emit the warning.
The bad code on output was caused by the removal of the unsigned bit
resulting from the signed sort, which causes only signed comparisons
to be emitted in the asm code. As such some sets could not match.
Note that there is no way to rely on the values only to sort properly
nor to emit the warning because we're effectively dealing with 65-bit
arithmetic here and any two values will have a different behavior
depending on the signed or unsigned expectation.
For unsigned expressions now the warning only happens when bounds are
switched, For signed expressions (e.g. if the input is signed long
above), the warning remains and the abnormal output as well. In both
cases this remains consistent with what gcc produces.
2020-08-18 17:08:44 +08:00
|
|
|
static int case_cmpi(const void *pa, const void *pb)
|
2016-10-03 15:40:37 +08:00
|
|
|
{
|
2016-11-06 12:02:11 +08:00
|
|
|
int64_t a = (*(struct case_t**) pa)->v1;
|
|
|
|
int64_t b = (*(struct case_t**) pb)->v1;
|
2016-10-03 15:40:37 +08:00
|
|
|
return a < b ? -1 : a > b;
|
|
|
|
}
|
2016-09-21 23:35:29 +08:00
|
|
|
|
Fix switch/case on uint64_t
The switch/case operation was entirely performed on int64_t, resulting
in a warning and bad code to be emitted on 64 bit machines when used on
an unsigned long with a case range whose signed representation starts
positive and ends negative like in the example below:
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
int nbdg(unsigned long n)
{
switch (n) {
case 1UL ... 9UL: return 1;
case 10UL ... 99UL: return 2;
case 100UL ... 999UL: return 3;
case 1000UL ... 9999UL: return 4;
case 10000UL ... 99999UL: return 5;
case 100000UL ... 999999UL: return 6;
case 1000000UL ... 9999999UL: return 7;
case 10000000UL ... 99999999UL: return 8;
case 100000000UL ... 999999999UL: return 9;
case 1000000000UL ... 9999999999UL: return 10;
case 10000000000UL ... 99999999999UL: return 11;
case 100000000000UL ... 999999999999UL: return 12;
case 1000000000000UL ... 9999999999999UL: return 13;
case 10000000000000UL ... 99999999999999UL: return 14;
case 100000000000000UL ... 999999999999999UL: return 15;
case 1000000000000000UL ... 9999999999999999UL: return 16;
case 10000000000000000UL ... 99999999999999999UL: return 17;
case 100000000000000000UL ... 999999999999999999UL: return 18;
case 1000000000000000000UL ... 9999999999999999999UL: return 19; // this one
case 10000000000000000000UL ... ULONG_MAX: return 20;
}
return 0;
}
int main(int argc, char **argv)
{
unsigned long v = strtoul(argc > 1 ? argv[1] : "1111", NULL, 0);
printf("%lu : %d\n", v, nbdg(v));
return 0;
}
$ tcc dg.c
dg.c:26: warning: empty case range
$ x="";for i in 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0; do x=$x$i; ./a.out $x;done
1 : 1
12 : 2
123 : 3
1234 : 4
12345 : 5
123456 : 6
1234567 : 7
12345678 : 8
123456789 : 9
1234567890 : 10
12345678901 : 11
123456789012 : 12
1234567890123 : 13
12345678901234 : 14
123456789012345 : 15
1234567890123456 : 16
12345678901234567 : 17
123456789012345678 : 18
1234567890123456789 : 0
12345678901234567890 : 20
What this patch does is to use a separate set of signed and unsigned
case_cmp functions depending on whether the expression is signed or
unsigned, and also does this to decide when to emit the warning.
The bad code on output was caused by the removal of the unsigned bit
resulting from the signed sort, which causes only signed comparisons
to be emitted in the asm code. As such some sets could not match.
Note that there is no way to rely on the values only to sort properly
nor to emit the warning because we're effectively dealing with 65-bit
arithmetic here and any two values will have a different behavior
depending on the signed or unsigned expectation.
For unsigned expressions now the warning only happens when bounds are
switched, For signed expressions (e.g. if the input is signed long
above), the warning remains and the abnormal output as well. In both
cases this remains consistent with what gcc produces.
2020-08-18 17:08:44 +08:00
|
|
|
static int case_cmpu(const void *pa, const void *pb)
|
|
|
|
{
|
|
|
|
uint64_t a = (uint64_t)(*(struct case_t**) pa)->v1;
|
|
|
|
uint64_t b = (uint64_t)(*(struct case_t**) pb)->v1;
|
|
|
|
return a < b ? -1 : a > b;
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gtst_addr(int t, int a)
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym_addr(gvtst(0, t), a);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gcase(struct case_t **base, int len, int *bsym)
|
2016-10-03 18:14:34 +08:00
|
|
|
{
|
|
|
|
struct case_t *p;
|
|
|
|
int e;
|
2021-10-22 13:39:54 +08:00
|
|
|
int ll = (vtop->type.t & VT_BTYPE) == VT_LLONG;
|
2018-06-08 21:31:40 +08:00
|
|
|
while (len > 8) {
|
2016-10-11 16:29:56 +08:00
|
|
|
/* binary search */
|
2016-10-03 18:14:34 +08:00
|
|
|
p = base[len/2];
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
2016-11-06 12:02:11 +08:00
|
|
|
if (ll)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushll(p->v2);
|
2016-11-06 12:02:11 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(p->v2);
|
|
|
|
gen_op(TOK_LE);
|
|
|
|
e = gvtst(1, 0);
|
|
|
|
vdup();
|
2016-11-06 12:02:11 +08:00
|
|
|
if (ll)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushll(p->v1);
|
2016-11-06 12:02:11 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(p->v1);
|
|
|
|
gen_op(TOK_GE);
|
|
|
|
gtst_addr(0, p->sym); /* v1 <= x <= v2 */
|
2016-10-11 16:29:56 +08:00
|
|
|
/* x < v1 */
|
2021-10-22 13:39:54 +08:00
|
|
|
gcase(base, len/2, bsym);
|
2016-10-11 16:29:56 +08:00
|
|
|
/* x > v2 */
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(e);
|
2016-10-03 18:14:34 +08:00
|
|
|
e = len/2 + 1;
|
2016-10-11 16:29:56 +08:00
|
|
|
base += e; len -= e;
|
|
|
|
}
|
|
|
|
/* linear scan */
|
|
|
|
while (len--) {
|
|
|
|
p = *base++;
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
2016-11-06 12:02:11 +08:00
|
|
|
if (ll)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushll(p->v2);
|
2016-11-06 12:02:11 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(p->v2);
|
2016-10-11 16:29:56 +08:00
|
|
|
if (p->v1 == p->v2) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(TOK_EQ);
|
|
|
|
gtst_addr(0, p->sym);
|
2016-10-11 16:29:56 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_op(TOK_LE);
|
|
|
|
e = gvtst(1, 0);
|
|
|
|
vdup();
|
2016-11-06 12:02:11 +08:00
|
|
|
if (ll)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushll(p->v1);
|
2016-11-06 12:02:11 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(p->v1);
|
|
|
|
gen_op(TOK_GE);
|
|
|
|
gtst_addr(0, p->sym);
|
|
|
|
gsym(e);
|
2016-10-11 16:29:56 +08:00
|
|
|
}
|
2016-10-03 18:14:34 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
*bsym = gjmp(*bsym);
|
2016-10-03 18:14:34 +08:00
|
|
|
}
|
|
|
|
|
2019-06-22 19:18:54 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* __attribute__((cleanup(fn))) */
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void try_call_scope_cleanup(Sym *stop)
|
2019-06-22 19:18:54 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
Sym *cls = cur_scope->cl.s;
|
2019-06-22 19:18:54 +08:00
|
|
|
|
|
|
|
for (; cls != stop; cls = cls->ncl) {
|
|
|
|
Sym *fs = cls->next;
|
|
|
|
Sym *vs = cls->prev_tok;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushsym(&fs->type, fs);
|
|
|
|
vset(&vs->type, vs->r, vs->c);
|
|
|
|
vtop->sym = vs;
|
|
|
|
mk_pointer(&vtop->type);
|
|
|
|
gaddrof();
|
|
|
|
gfunc_call(1);
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void try_call_cleanup_goto(Sym *cleanupstate)
|
2019-06-22 19:18:54 +08:00
|
|
|
{
|
|
|
|
Sym *oc, *cc;
|
|
|
|
int ocd, ccd;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!cur_scope->cl.s)
|
2019-06-22 19:18:54 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* search NCA of both cleanup chains given parents and initial depth */
|
|
|
|
ocd = cleanupstate ? cleanupstate->v & ~SYM_FIELD : 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
for (ccd = cur_scope->cl.n, oc = cleanupstate; ocd > ccd; --ocd, oc = oc->ncl)
|
2019-06-22 19:18:54 +08:00
|
|
|
;
|
2021-10-22 13:39:54 +08:00
|
|
|
for (cc = cur_scope->cl.s; ccd > ocd; --ccd, cc = cc->ncl)
|
2019-06-22 19:18:54 +08:00
|
|
|
;
|
|
|
|
for (; cc != oc; cc = cc->ncl, oc = oc->ncl, --ccd)
|
|
|
|
;
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
try_call_scope_cleanup(cc);
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
|
2019-04-29 19:53:07 +08:00
|
|
|
/* call 'func' for each __attribute__((cleanup(func))) */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void block_cleanup(struct scope *o)
|
2019-04-29 19:53:07 +08:00
|
|
|
{
|
|
|
|
int jmp = 0;
|
|
|
|
Sym *g, **pg;
|
2021-10-22 13:39:54 +08:00
|
|
|
for (pg = &pending_gotos; (g = *pg) && g->c > o->cl.n;) {
|
2019-04-29 19:53:07 +08:00
|
|
|
if (g->prev_tok->r & LABEL_FORWARD) {
|
|
|
|
Sym *pcl = g->next;
|
|
|
|
if (!jmp)
|
2021-10-22 13:39:54 +08:00
|
|
|
jmp = gjmp(0);
|
|
|
|
gsym(pcl->jnext);
|
|
|
|
try_call_scope_cleanup(o->cl.s);
|
|
|
|
pcl->jnext = gjmp(0);
|
2019-06-22 19:18:54 +08:00
|
|
|
if (!o->cl.n)
|
2019-04-29 19:53:07 +08:00
|
|
|
goto remove_pending;
|
2019-06-22 19:18:54 +08:00
|
|
|
g->c = o->cl.n;
|
2019-04-29 19:53:07 +08:00
|
|
|
pg = &g->prev;
|
|
|
|
} else {
|
|
|
|
remove_pending:
|
|
|
|
*pg = g->prev;
|
2021-10-22 13:39:54 +08:00
|
|
|
sym_free(g);
|
2019-04-29 19:53:07 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(jmp);
|
|
|
|
try_call_scope_cleanup(o->cl.s);
|
2019-04-29 19:53:07 +08:00
|
|
|
}
|
|
|
|
|
2019-06-22 19:18:54 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* VLA */
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vla_restore(int loc)
|
2019-04-29 19:53:07 +08:00
|
|
|
{
|
2019-06-22 19:18:54 +08:00
|
|
|
if (loc)
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_vla_sp_restore(loc);
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void vla_leave(struct scope *o)
|
2019-06-22 19:18:54 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
struct scope *c = cur_scope, *v = NULL;
|
2021-02-03 11:30:11 +08:00
|
|
|
for (; c != o && c; c = c->prev)
|
|
|
|
if (c->vla.num)
|
|
|
|
v = c;
|
|
|
|
if (v)
|
2021-10-22 13:39:54 +08:00
|
|
|
vla_restore(v->vla.locorig);
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* local scopes */
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void new_scope(struct scope *o)
|
2019-06-22 19:18:54 +08:00
|
|
|
{
|
|
|
|
/* copy and link previous scope */
|
2021-10-22 13:39:54 +08:00
|
|
|
*o = *cur_scope;
|
|
|
|
o->prev = cur_scope;
|
|
|
|
cur_scope = o;
|
|
|
|
cur_scope->vla.num = 0;
|
2019-06-22 19:18:54 +08:00
|
|
|
|
|
|
|
/* record local declaration stack position */
|
2021-10-22 13:39:54 +08:00
|
|
|
o->lstk = local_stack;
|
|
|
|
o->llstk = local_label_stack;
|
|
|
|
++local_scope;
|
2020-05-05 20:47:00 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
|
|
|
tcc_debug_stabn(tcc_state, N_LBRAC, ind - func_ind);
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void prev_scope(struct scope *o, int is_expr)
|
2019-06-22 19:18:54 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
vla_leave(o->prev);
|
2019-06-22 19:18:54 +08:00
|
|
|
|
|
|
|
if (o->cl.s != o->prev->cl.s)
|
2021-10-22 13:39:54 +08:00
|
|
|
block_cleanup(o->prev);
|
2019-06-22 19:18:54 +08:00
|
|
|
|
|
|
|
/* pop locally defined labels */
|
2021-10-22 13:39:54 +08:00
|
|
|
label_pop(&local_label_stack, o->llstk, is_expr);
|
2019-06-22 19:18:54 +08:00
|
|
|
|
|
|
|
/* In the is_expr case (a statement expression is finished here),
|
|
|
|
vtop might refer to symbols on the local_stack. Either via the
|
|
|
|
type or via vtop->sym. We can't pop those nor any that in turn
|
|
|
|
might be referred to. To make it easier we don't roll back
|
|
|
|
any symbols in that case; some upper level call to block() will
|
|
|
|
do that. We do have to remove such symbols from the lookup
|
|
|
|
tables, though. sym_pop will do that. */
|
|
|
|
|
|
|
|
/* pop locally defined symbols */
|
2021-10-22 13:39:54 +08:00
|
|
|
pop_local_syms(o->lstk, is_expr);
|
|
|
|
cur_scope = o->prev;
|
|
|
|
--local_scope;
|
2020-05-05 20:47:00 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
|
|
|
tcc_debug_stabn(tcc_state, N_RBRAC, ind - func_ind);
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* leave a scope via break/continue(/goto) */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void leave_scope(struct scope *o)
|
2019-06-22 19:18:54 +08:00
|
|
|
{
|
|
|
|
if (!o)
|
2019-04-29 19:53:07 +08:00
|
|
|
return;
|
2021-10-22 13:39:54 +08:00
|
|
|
try_call_scope_cleanup(o->cl.s);
|
|
|
|
vla_leave(o);
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* call block from 'for do while' loops */
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void lblock(int *bsym, int *csym)
|
2019-06-22 19:18:54 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
struct scope *lo = loop_scope, *co = cur_scope;
|
2019-06-22 19:18:54 +08:00
|
|
|
int *b = co->bsym, *c = co->csym;
|
|
|
|
if (csym) {
|
|
|
|
co->csym = csym;
|
2021-10-22 13:39:54 +08:00
|
|
|
loop_scope = co;
|
2019-06-22 19:18:54 +08:00
|
|
|
}
|
|
|
|
co->bsym = bsym;
|
2021-10-22 13:39:54 +08:00
|
|
|
block(0);
|
2019-06-22 19:18:54 +08:00
|
|
|
co->bsym = b;
|
|
|
|
if (csym) {
|
|
|
|
co->csym = c;
|
2021-10-22 13:39:54 +08:00
|
|
|
loop_scope = lo;
|
2019-04-29 19:53:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void block(int is_expr)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2019-04-29 19:53:07 +08:00
|
|
|
int a, b, c, d, e, t;
|
2021-10-22 13:39:54 +08:00
|
|
|
struct scope o;
|
2016-05-05 16:39:09 +08:00
|
|
|
Sym *s;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
if (is_expr) {
|
|
|
|
/* default return value is (void) */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
|
|
|
vtop->type.t = VT_VOID;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2019-06-22 19:18:54 +08:00
|
|
|
again:
|
2021-10-22 13:39:54 +08:00
|
|
|
t = tok;
|
2020-10-24 03:38:53 +08:00
|
|
|
/* If the token carries a value, next() might destroy it. Only with
|
|
|
|
invalid code such as f(){"123"4;} */
|
|
|
|
if (TOK_HAS_VALUE(t))
|
|
|
|
goto expr;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_tcov_check_line (tcc_state, 0), tcc_tcov_block_begin (tcc_state);
|
2021-01-24 23:20:48 +08:00
|
|
|
|
2019-04-29 19:53:07 +08:00
|
|
|
if (t == TOK_IF) {
|
2023-03-06 05:31:40 +08:00
|
|
|
//new_scope(&o); //?? breaks tests2.122
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
gexpr();
|
|
|
|
skip(')');
|
|
|
|
a = gvtst(1, 0);
|
|
|
|
block(0);
|
|
|
|
if (tok == TOK_ELSE) {
|
|
|
|
d = gjmp(0);
|
|
|
|
gsym(a);
|
|
|
|
next();
|
|
|
|
block(0);
|
|
|
|
gsym(d); /* patch else jmp */
|
2019-04-29 19:53:07 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(a);
|
2019-04-29 19:53:07 +08:00
|
|
|
}
|
2023-03-06 05:31:40 +08:00
|
|
|
//prev_scope(&o,0); //?? breaks tests2.122
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_WHILE) {
|
2023-03-06 05:31:40 +08:00
|
|
|
new_scope(&o);
|
2022-08-20 18:58:56 +08:00
|
|
|
d = gind();
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
gexpr();
|
|
|
|
skip(')');
|
|
|
|
a = gvtst(1, 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
b = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
lblock(&a, &b);
|
|
|
|
gjmp_addr(d);
|
|
|
|
gsym_addr(b, d);
|
|
|
|
gsym(a);
|
2023-03-06 05:31:40 +08:00
|
|
|
prev_scope(&o,0);
|
2019-04-29 19:53:07 +08:00
|
|
|
} else if (t == '{') {
|
2021-10-22 13:39:54 +08:00
|
|
|
new_scope(&o);
|
2019-01-30 04:32:38 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* handle local labels declarations */
|
2021-10-22 13:39:54 +08:00
|
|
|
while (tok == TOK_LABEL) {
|
2019-04-29 19:53:07 +08:00
|
|
|
do {
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
if (tok < TOK_UIDENT)
|
|
|
|
expect("label identifier");
|
|
|
|
label_push(&local_label_stack, tok, LABEL_DECLARED);
|
|
|
|
next();
|
|
|
|
} while (tok == ',');
|
|
|
|
skip(';');
|
|
|
|
}
|
|
|
|
|
|
|
|
while (tok != '}') {
|
|
|
|
decl(VT_LOCAL);
|
|
|
|
if (tok != '}') {
|
2009-05-06 02:18:10 +08:00
|
|
|
if (is_expr)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
|
|
|
block(is_expr);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
2018-12-20 17:55:22 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
prev_scope(&o, is_expr);
|
|
|
|
if (local_scope)
|
|
|
|
next();
|
|
|
|
else if (!nocode_wanted)
|
|
|
|
check_func_return();
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_RETURN) {
|
2021-10-22 13:39:54 +08:00
|
|
|
b = (func_vt.t & VT_BTYPE) != VT_VOID;
|
|
|
|
if (tok != ';') {
|
|
|
|
gexpr();
|
2019-12-17 01:51:28 +08:00
|
|
|
if (b) {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_assign_cast(&func_vt);
|
2019-12-17 01:51:28 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->type.t != VT_VOID)
|
|
|
|
tcc_warning("void function returns a value");
|
|
|
|
vtop--;
|
2019-12-17 01:51:28 +08:00
|
|
|
}
|
|
|
|
} else if (b) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("'return' with no value");
|
2019-12-17 01:51:28 +08:00
|
|
|
b = 0;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
leave_scope(root_scope);
|
2019-12-17 01:51:28 +08:00
|
|
|
if (b)
|
2021-10-22 13:39:54 +08:00
|
|
|
gfunc_return(&func_vt);
|
|
|
|
skip(';');
|
2016-08-11 19:59:08 +08:00
|
|
|
/* jump unless last stmt in top-level block */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != '}' || local_scope != 1)
|
|
|
|
rsym = gjmp(rsym);
|
|
|
|
if (debug_modes)
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_tcov_block_end (tcc_state, -1);
|
2019-04-29 19:53:07 +08:00
|
|
|
CODE_OFF();
|
|
|
|
|
|
|
|
} else if (t == TOK_BREAK) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* compute jump */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!cur_scope->bsym)
|
|
|
|
tcc_error("cannot break");
|
|
|
|
if (cur_switch && cur_scope->bsym == cur_switch->bsym)
|
|
|
|
leave_scope(cur_switch->scope);
|
2020-01-19 18:15:12 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
leave_scope(loop_scope);
|
|
|
|
*cur_scope->bsym = gjmp(*cur_scope->bsym);
|
|
|
|
skip(';');
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_CONTINUE) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* compute jump */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!cur_scope->csym)
|
|
|
|
tcc_error("cannot continue");
|
|
|
|
leave_scope(loop_scope);
|
|
|
|
*cur_scope->csym = gjmp(*cur_scope->csym);
|
|
|
|
skip(';');
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_FOR) {
|
2021-10-22 13:39:54 +08:00
|
|
|
new_scope(&o);
|
2019-04-30 11:44:15 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
if (tok != ';') {
|
2011-03-09 05:36:04 +08:00
|
|
|
/* c99 for-loop init decl? */
|
2022-10-15 02:10:38 +08:00
|
|
|
if (!decl(VT_JMP)) {
|
2011-03-09 05:36:04 +08:00
|
|
|
/* no, regular for-loop init expr */
|
2021-10-22 13:39:54 +08:00
|
|
|
gexpr();
|
|
|
|
vpop();
|
2011-03-09 05:36:04 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(';');
|
2019-04-29 19:53:07 +08:00
|
|
|
a = b = 0;
|
2022-08-20 18:58:56 +08:00
|
|
|
c = d = gind();
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != ';') {
|
|
|
|
gexpr();
|
|
|
|
a = gvtst(1, 0);
|
|
|
|
}
|
|
|
|
skip(';');
|
|
|
|
if (tok != ')') {
|
|
|
|
e = gjmp(0);
|
2022-08-20 18:58:56 +08:00
|
|
|
d = gind();
|
2021-10-22 13:39:54 +08:00
|
|
|
gexpr();
|
|
|
|
vpop();
|
|
|
|
gjmp_addr(c);
|
|
|
|
gsym(e);
|
|
|
|
}
|
|
|
|
skip(')');
|
|
|
|
lblock(&a, &b);
|
|
|
|
gjmp_addr(d);
|
|
|
|
gsym_addr(b, d);
|
|
|
|
gsym(a);
|
|
|
|
prev_scope(&o, 0);
|
2016-05-05 16:39:09 +08:00
|
|
|
|
2019-04-29 19:53:07 +08:00
|
|
|
} else if (t == TOK_DO) {
|
2023-03-06 05:31:40 +08:00
|
|
|
new_scope(&o);
|
2019-04-29 19:53:07 +08:00
|
|
|
a = b = 0;
|
2022-08-20 18:58:56 +08:00
|
|
|
d = gind();
|
2021-10-22 13:39:54 +08:00
|
|
|
lblock(&a, &b);
|
|
|
|
gsym(b);
|
|
|
|
skip(TOK_WHILE);
|
|
|
|
skip('(');
|
|
|
|
gexpr();
|
|
|
|
skip(')');
|
|
|
|
skip(';');
|
2023-03-06 05:31:40 +08:00
|
|
|
prev_scope(&o,0);
|
2021-10-22 13:39:54 +08:00
|
|
|
c = gvtst(0, 0);
|
|
|
|
gsym_addr(c, d);
|
|
|
|
gsym(a);
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_SWITCH) {
|
2020-01-19 18:15:12 +08:00
|
|
|
struct switch_t *sw;
|
2019-06-22 19:18:54 +08:00
|
|
|
|
2023-03-06 05:31:40 +08:00
|
|
|
new_scope(&o);
|
2021-10-22 13:39:54 +08:00
|
|
|
sw = tcc_mallocz(sizeof *sw);
|
2020-01-19 18:15:12 +08:00
|
|
|
sw->bsym = &a;
|
2021-10-22 13:39:54 +08:00
|
|
|
sw->scope = cur_scope;
|
|
|
|
sw->prev = cur_switch;
|
2022-07-12 23:07:33 +08:00
|
|
|
sw->nocode_wanted = nocode_wanted;
|
2021-10-22 13:39:54 +08:00
|
|
|
cur_switch = sw;
|
2019-06-22 19:18:54 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('(');
|
|
|
|
gexpr();
|
|
|
|
skip(')');
|
|
|
|
sw->sv = *vtop--; /* save switch value */
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
|
|
|
|
a = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
b = gjmp(0); /* jump to first case */
|
|
|
|
lblock(&a, NULL);
|
|
|
|
a = gjmp(a); /* add implicit break */
|
2016-10-03 18:14:34 +08:00
|
|
|
/* case lookup */
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(b);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
|
2022-08-20 18:58:56 +08:00
|
|
|
if (sw->nocode_wanted)
|
|
|
|
goto skip_switch;
|
Fix switch/case on uint64_t
The switch/case operation was entirely performed on int64_t, resulting
in a warning and bad code to be emitted on 64 bit machines when used on
an unsigned long with a case range whose signed representation starts
positive and ends negative like in the example below:
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
int nbdg(unsigned long n)
{
switch (n) {
case 1UL ... 9UL: return 1;
case 10UL ... 99UL: return 2;
case 100UL ... 999UL: return 3;
case 1000UL ... 9999UL: return 4;
case 10000UL ... 99999UL: return 5;
case 100000UL ... 999999UL: return 6;
case 1000000UL ... 9999999UL: return 7;
case 10000000UL ... 99999999UL: return 8;
case 100000000UL ... 999999999UL: return 9;
case 1000000000UL ... 9999999999UL: return 10;
case 10000000000UL ... 99999999999UL: return 11;
case 100000000000UL ... 999999999999UL: return 12;
case 1000000000000UL ... 9999999999999UL: return 13;
case 10000000000000UL ... 99999999999999UL: return 14;
case 100000000000000UL ... 999999999999999UL: return 15;
case 1000000000000000UL ... 9999999999999999UL: return 16;
case 10000000000000000UL ... 99999999999999999UL: return 17;
case 100000000000000000UL ... 999999999999999999UL: return 18;
case 1000000000000000000UL ... 9999999999999999999UL: return 19; // this one
case 10000000000000000000UL ... ULONG_MAX: return 20;
}
return 0;
}
int main(int argc, char **argv)
{
unsigned long v = strtoul(argc > 1 ? argv[1] : "1111", NULL, 0);
printf("%lu : %d\n", v, nbdg(v));
return 0;
}
$ tcc dg.c
dg.c:26: warning: empty case range
$ x="";for i in 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0; do x=$x$i; ./a.out $x;done
1 : 1
12 : 2
123 : 3
1234 : 4
12345 : 5
123456 : 6
1234567 : 7
12345678 : 8
123456789 : 9
1234567890 : 10
12345678901 : 11
123456789012 : 12
1234567890123 : 13
12345678901234 : 14
123456789012345 : 15
1234567890123456 : 16
12345678901234567 : 17
123456789012345678 : 18
1234567890123456789 : 0
12345678901234567890 : 20
What this patch does is to use a separate set of signed and unsigned
case_cmp functions depending on whether the expression is signed or
unsigned, and also does this to decide when to emit the warning.
The bad code on output was caused by the removal of the unsigned bit
resulting from the signed sort, which causes only signed comparisons
to be emitted in the asm code. As such some sets could not match.
Note that there is no way to rely on the values only to sort properly
nor to emit the warning because we're effectively dealing with 65-bit
arithmetic here and any two values will have a different behavior
depending on the signed or unsigned expectation.
For unsigned expressions now the warning only happens when bounds are
switched, For signed expressions (e.g. if the input is signed long
above), the warning remains and the abnormal output as well. In both
cases this remains consistent with what gcc produces.
2020-08-18 17:08:44 +08:00
|
|
|
if (sw->sv.type.t & VT_UNSIGNED)
|
|
|
|
qsort(sw->p, sw->n, sizeof(void*), case_cmpu);
|
|
|
|
else
|
|
|
|
qsort(sw->p, sw->n, sizeof(void*), case_cmpi);
|
2020-01-19 18:15:12 +08:00
|
|
|
for (b = 1; b < sw->n; b++)
|
2020-08-19 02:05:53 +08:00
|
|
|
if (sw->sv.type.t & VT_UNSIGNED
|
|
|
|
? (uint64_t)sw->p[b - 1]->v2 >= (uint64_t)sw->p[b]->v1
|
|
|
|
: sw->p[b - 1]->v2 >= sw->p[b]->v1)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("duplicate case value");
|
|
|
|
vpushv(&sw->sv);
|
|
|
|
gv(RC_INT);
|
|
|
|
d = 0, gcase(sw->p, sw->n, &d);
|
|
|
|
vpop();
|
2020-01-19 18:15:12 +08:00
|
|
|
if (sw->def_sym)
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym_addr(d, sw->def_sym);
|
jump optimizations
This unifies VT_CMP with VT_JMP(i) by using mostly VT_CMP
with both a positive and a negative jump target list.
Such we can delay putting the non-inverted or inverted jump
until we can see which one is nore suitable (in most cases).
example:
if (a && b || c && d)
e = 0;
before this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 11 00 00 00 je 27 <main+0x27>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 84 05 00 00 00 je 27 <main+0x27>
22: e9 22 00 00 00 jmp 49 <main+0x49>
27: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
2a: 83 f8 00 cmp $0x0,%eax
2d: 0f 84 11 00 00 00 je 44 <main+0x44>
33: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
36: 83 f8 00 cmp $0x0,%eax
39: 0f 84 05 00 00 00 je 44 <main+0x44>
3f: e9 05 00 00 00 jmp 49 <main+0x49>
44: e9 08 00 00 00 jmp 51 <main+0x51>
49: b8 00 00 00 00 mov $0x0,%eax
4e: 89 45 ec mov %eax,0xffffffec(%ebp)
51: ...
with this patch:
a: 8b 45 fc mov 0xfffffffc(%ebp),%eax
d: 83 f8 00 cmp $0x0,%eax
10: 0f 84 0c 00 00 00 je 22 <main+0x22>
16: 8b 45 f8 mov 0xfffffff8(%ebp),%eax
19: 83 f8 00 cmp $0x0,%eax
1c: 0f 85 18 00 00 00 jne 3a <main+0x3a>
22: 8b 45 f4 mov 0xfffffff4(%ebp),%eax
25: 83 f8 00 cmp $0x0,%eax
28: 0f 84 14 00 00 00 je 42 <main+0x42>
2e: 8b 45 f0 mov 0xfffffff0(%ebp),%eax
31: 83 f8 00 cmp $0x0,%eax
34: 0f 84 08 00 00 00 je 42 <main+0x42>
3a: b8 00 00 00 00 mov $0x0,%eax
3f: 89 45 ec mov %eax,0xffffffec(%ebp)
42: ...
2019-06-22 17:45:35 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(d);
|
2022-08-20 18:58:56 +08:00
|
|
|
skip_switch:
|
2009-05-06 02:18:10 +08:00
|
|
|
/* break label */
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(a);
|
2019-06-22 19:18:54 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
dynarray_reset(&sw->p, &sw->n);
|
|
|
|
cur_switch = sw->prev;
|
|
|
|
tcc_free(sw);
|
2023-03-06 05:31:40 +08:00
|
|
|
prev_scope(&o,0);
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_CASE) {
|
2021-10-22 13:39:54 +08:00
|
|
|
struct case_t *cr = tcc_malloc(sizeof(struct case_t));
|
|
|
|
if (!cur_switch)
|
|
|
|
expect("switch");
|
|
|
|
cr->v1 = cr->v2 = expr_const64();
|
|
|
|
if (gnu_ext && tok == TOK_DOTS) {
|
|
|
|
next();
|
|
|
|
cr->v2 = expr_const64();
|
|
|
|
if ((!(cur_switch->sv.type.t & VT_UNSIGNED) && cr->v2 < cr->v1)
|
|
|
|
|| (cur_switch->sv.type.t & VT_UNSIGNED && (uint64_t)cr->v2 < (uint64_t)cr->v1))
|
|
|
|
tcc_warning("empty case range");
|
|
|
|
}
|
2022-08-20 18:58:56 +08:00
|
|
|
/* case and default are unreachable from a switch under nocode_wanted */
|
|
|
|
if (!cur_switch->nocode_wanted)
|
|
|
|
cr->sym = gind();
|
2021-10-22 13:39:54 +08:00
|
|
|
dynarray_add(&cur_switch->p, &cur_switch->n, cr);
|
|
|
|
skip(':');
|
2009-05-06 02:18:10 +08:00
|
|
|
is_expr = 0;
|
|
|
|
goto block_after_label;
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_DEFAULT) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!cur_switch)
|
|
|
|
expect("switch");
|
|
|
|
if (cur_switch->def_sym)
|
|
|
|
tcc_error("too many 'default'");
|
2022-08-20 18:58:56 +08:00
|
|
|
cur_switch->def_sym = cur_switch->nocode_wanted ? 1 : gind();
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(':');
|
2009-05-06 02:18:10 +08:00
|
|
|
is_expr = 0;
|
|
|
|
goto block_after_label;
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_GOTO) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (cur_scope->vla.num)
|
|
|
|
vla_restore(cur_scope->vla.locorig);
|
|
|
|
if (tok == '*' && gnu_ext) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* computed goto */
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
gexpr();
|
|
|
|
if ((vtop->type.t & VT_BTYPE) != VT_PTR)
|
|
|
|
expect("pointer");
|
|
|
|
ggoto();
|
|
|
|
|
|
|
|
} else if (tok >= TOK_UIDENT) {
|
|
|
|
s = label_find(tok);
|
2019-01-28 08:21:38 +08:00
|
|
|
/* put forward definition if needed */
|
|
|
|
if (!s)
|
2021-10-22 13:39:54 +08:00
|
|
|
s = label_push(&global_label_stack, tok, LABEL_FORWARD);
|
2019-01-28 08:21:38 +08:00
|
|
|
else if (s->r == LABEL_DECLARED)
|
|
|
|
s->r = LABEL_FORWARD;
|
2018-12-20 17:55:22 +08:00
|
|
|
|
2019-01-28 08:21:38 +08:00
|
|
|
if (s->r & LABEL_FORWARD) {
|
|
|
|
/* start new goto chain for cleanups, linked via label->next */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (cur_scope->cl.s && !nocode_wanted) {
|
|
|
|
sym_push2(&pending_gotos, SYM_FIELD, 0, cur_scope->cl.n);
|
|
|
|
pending_gotos->prev_tok = s;
|
|
|
|
s = sym_push2(&s->next, SYM_FIELD, 0, 0);
|
|
|
|
pending_gotos->next = s;
|
2019-01-28 08:21:38 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
s->jnext = gjmp(s->jnext);
|
2019-01-28 08:21:38 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
try_call_cleanup_goto(s->cleanupstate);
|
|
|
|
gjmp_addr(s->jnext);
|
2019-01-28 08:21:38 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("label identifier");
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(';');
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
} else if (t == TOK_ASM1 || t == TOK_ASM2 || t == TOK_ASM3) {
|
2021-10-22 13:39:54 +08:00
|
|
|
asm_instr();
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ':' && t >= TOK_UIDENT) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* label case */
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
s = label_find(t);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (s) {
|
|
|
|
if (s->r == LABEL_DEFINED)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("duplicate label '%s'", get_tok_str(s->v, NULL));
|
2009-05-06 02:18:10 +08:00
|
|
|
s->r = LABEL_DEFINED;
|
2019-01-28 08:21:38 +08:00
|
|
|
if (s->next) {
|
|
|
|
Sym *pcl; /* pending cleanup goto */
|
|
|
|
for (pcl = s->next; pcl; pcl = pcl->prev)
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(pcl->jnext);
|
|
|
|
sym_pop(&s->next, NULL, 0);
|
2019-01-28 08:21:38 +08:00
|
|
|
} else
|
2021-10-22 13:39:54 +08:00
|
|
|
gsym(s->jnext);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
s = label_push(&global_label_stack, t, LABEL_DEFINED);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2022-08-20 18:58:56 +08:00
|
|
|
s->jnext = gind();
|
2021-10-22 13:39:54 +08:00
|
|
|
s->cleanupstate = cur_scope->cl.s;
|
2019-04-29 19:53:07 +08:00
|
|
|
|
|
|
|
block_after_label:
|
2022-07-08 23:06:05 +08:00
|
|
|
{
|
|
|
|
/* Accept attributes after labels (e.g. 'unused') */
|
|
|
|
AttributeDef ad_tmp;
|
|
|
|
parse_attribute(&ad_tmp);
|
|
|
|
}
|
2022-08-20 18:58:56 +08:00
|
|
|
if (debug_modes)
|
|
|
|
tcc_tcov_reset_ind(tcc_state);
|
2021-10-22 13:39:54 +08:00
|
|
|
vla_restore(cur_scope->vla.loc);
|
|
|
|
if (tok != '}')
|
2019-06-22 19:18:54 +08:00
|
|
|
goto again;
|
2021-08-01 02:44:51 +08:00
|
|
|
/* we accept this, but it is a mistake */
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning_c(warn_all)("deprecated use of label at end of compound statement");
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
/* expression case */
|
2019-04-29 19:53:07 +08:00
|
|
|
if (t != ';') {
|
2021-10-22 13:39:54 +08:00
|
|
|
unget_tok(t);
|
2020-10-24 03:38:53 +08:00
|
|
|
expr:
|
2009-05-06 02:18:10 +08:00
|
|
|
if (is_expr) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
|
|
|
gexpr();
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gexpr();
|
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(';');
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-01-24 23:20:48 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_tcov_check_line (tcc_state, 0), tcc_tcov_block_end (tcc_state, 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2017-03-07 04:43:48 +08:00
|
|
|
/* This skips over a stream of tokens containing balanced {} and ()
|
2017-07-04 00:13:15 +08:00
|
|
|
pairs, stopping at outer ',' ';' and '}' (or matching '}' if we started
|
|
|
|
with a '{'). If STR then allocates and stores the skipped tokens
|
|
|
|
in *STR. This doesn't check if () and {} are nested correctly,
|
|
|
|
i.e. "({)}" is accepted. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void skip_or_save_block(TokenString **str)
|
2017-03-07 04:43:48 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
int braces = tok == '{';
|
2017-03-07 04:43:48 +08:00
|
|
|
int level = 0;
|
|
|
|
if (str)
|
2021-10-22 13:39:54 +08:00
|
|
|
*str = tok_str_alloc();
|
2017-03-07 04:43:48 +08:00
|
|
|
|
2022-08-20 18:58:56 +08:00
|
|
|
while (1) {
|
|
|
|
int t = tok;
|
|
|
|
if (level == 0
|
|
|
|
&& (t == ','
|
|
|
|
|| t == ';'
|
|
|
|
|| t == '}'
|
|
|
|
|| t == ')'
|
|
|
|
|| t == ']'))
|
|
|
|
break;
|
|
|
|
if (t == TOK_EOF) {
|
2017-03-07 04:43:48 +08:00
|
|
|
if (str || level > 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("unexpected end of file");
|
2017-03-07 04:43:48 +08:00
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (str)
|
2021-10-22 13:39:54 +08:00
|
|
|
tok_str_add_tok(*str);
|
|
|
|
next();
|
2022-08-20 18:58:56 +08:00
|
|
|
if (t == '{' || t == '(' || t == '[') {
|
2017-03-07 04:43:48 +08:00
|
|
|
level++;
|
2022-08-20 18:58:56 +08:00
|
|
|
} else if (t == '}' || t == ')' || t == ']') {
|
2017-03-07 04:43:48 +08:00
|
|
|
level--;
|
2017-07-04 00:13:15 +08:00
|
|
|
if (level == 0 && braces && t == '}')
|
2017-03-07 04:43:48 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (str) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tok_str_add(*str, -1);
|
|
|
|
tok_str_add(*str, 0);
|
2017-03-07 04:43:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-31 11:43:17 +08:00
|
|
|
#define EXPR_CONST 1
|
|
|
|
#define EXPR_ANY 2
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void parse_init_elem(int expr_type)
|
2016-07-31 11:43:17 +08:00
|
|
|
{
|
|
|
|
int saved_global_expr;
|
|
|
|
switch(expr_type) {
|
|
|
|
case EXPR_CONST:
|
|
|
|
/* compound literals must be allocated globally in this case */
|
2021-10-22 13:39:54 +08:00
|
|
|
saved_global_expr = global_expr;
|
|
|
|
global_expr = 1;
|
|
|
|
expr_const1();
|
|
|
|
global_expr = saved_global_expr;
|
2017-02-19 11:25:46 +08:00
|
|
|
/* NOTE: symbols are accepted, as well as lvalue for anon symbols
|
|
|
|
(compound literals). */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (((vtop->r & (VT_VALMASK | VT_LVAL)) != VT_CONST
|
|
|
|
&& ((vtop->r & (VT_SYM|VT_LVAL)) != (VT_SYM|VT_LVAL)
|
|
|
|
|| vtop->sym->v < SYM_FIRST_ANOM))
|
2017-04-04 14:34:52 +08:00
|
|
|
#ifdef TCC_TARGET_PE
|
2021-10-22 13:39:54 +08:00
|
|
|
|| ((vtop->r & VT_SYM) && vtop->sym->a.dllimport)
|
2017-04-04 14:34:52 +08:00
|
|
|
#endif
|
2020-06-21 03:56:53 +08:00
|
|
|
)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("initializer element is not constant");
|
2016-07-31 11:43:17 +08:00
|
|
|
break;
|
|
|
|
case EXPR_ANY:
|
2021-10-22 13:39:54 +08:00
|
|
|
expr_eq();
|
2016-07-31 11:43:17 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
#if 1
|
2021-10-22 13:39:54 +08:00
|
|
|
static void init_assert(init_params *p, int offset)
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
{
|
|
|
|
if (p->sec ? !NODATA_WANTED && offset > p->sec->data_offset
|
2021-10-22 13:39:54 +08:00
|
|
|
: !nocode_wanted && offset > p->local_offset)
|
|
|
|
tcc_internal_error("initializer overflow");
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
}
|
|
|
|
#else
|
2021-10-22 13:39:54 +08:00
|
|
|
#define init_assert(sec, offset)
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
#endif
|
|
|
|
|
2017-05-06 11:28:13 +08:00
|
|
|
/* put zeros for variable based init */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void init_putz(init_params *p, unsigned long c, int size)
|
2017-05-06 11:28:13 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
init_assert(p, c + size);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (p->sec) {
|
2017-05-06 11:28:13 +08:00
|
|
|
/* nothing to do because globals are already set to zero */
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_helper_func(TOK_memset);
|
|
|
|
vseti(VT_LOCAL, c);
|
2017-05-06 11:28:13 +08:00
|
|
|
#ifdef TCC_TARGET_ARM
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushs(size);
|
|
|
|
vpushi(0);
|
2017-05-06 11:28:13 +08:00
|
|
|
#else
|
2021-10-22 13:39:54 +08:00
|
|
|
vpushi(0);
|
|
|
|
vpushs(size);
|
2017-05-06 11:28:13 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
gfunc_call(3);
|
2017-05-06 11:28:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-18 10:26:19 +08:00
|
|
|
#define DIF_FIRST 1
|
|
|
|
#define DIF_SIZE_ONLY 2
|
|
|
|
#define DIF_HAVE_ELEM 4
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
#define DIF_CLEAR 8
|
|
|
|
|
|
|
|
/* delete relocations for specified range c ... c + size. Unfortunatly
|
|
|
|
in very special cases, relocations may occur unordered */
|
|
|
|
static void decl_design_delrels(Section *sec, int c, int size)
|
|
|
|
{
|
|
|
|
ElfW_Rel *rel, *rel2, *rel_end;
|
|
|
|
if (!sec || !sec->reloc)
|
|
|
|
return;
|
|
|
|
rel = rel2 = (ElfW_Rel*)sec->reloc->data;
|
|
|
|
rel_end = (ElfW_Rel*)(sec->reloc->data + sec->reloc->data_offset);
|
|
|
|
while (rel < rel_end) {
|
|
|
|
if (rel->r_offset >= c && rel->r_offset < c + size) {
|
|
|
|
sec->reloc->data_offset -= sizeof *rel;
|
|
|
|
} else {
|
|
|
|
if (rel2 != rel)
|
|
|
|
memcpy(rel2, rel, sizeof *rel);
|
|
|
|
++rel2;
|
|
|
|
}
|
|
|
|
++rel;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void decl_design_flex(init_params *p, Sym *ref, int index)
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
{
|
|
|
|
if (ref == p->flex_array_ref) {
|
|
|
|
if (index >= ref->c)
|
|
|
|
ref->c = index + 1;
|
|
|
|
} else if (ref->c < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("flexible array has zero size in this context");
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
}
|
2019-03-18 10:26:19 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* t is the array or struct type. c is the array or struct
|
2016-08-01 11:30:55 +08:00
|
|
|
address. cur_field is the pointer to the current
|
2017-05-06 11:28:13 +08:00
|
|
|
field, for arrays the 'c' member contains the current start
|
2019-03-18 10:26:19 +08:00
|
|
|
index. 'flags' is as in decl_initializer.
|
|
|
|
'al' contains the already initialized length of the
|
2017-05-06 11:28:13 +08:00
|
|
|
current container (starting at c). This returns the new length of that. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static int decl_designator(init_params *p, CType *type, unsigned long c,
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
Sym **cur_field, int flags, int al)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
Sym *s, *f;
|
2017-04-16 02:07:26 +08:00
|
|
|
int index, index_last, align, l, nb_elems, elem_size;
|
2017-05-06 11:28:13 +08:00
|
|
|
unsigned long corig = c;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
elem_size = 0;
|
|
|
|
nb_elems = 1;
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2019-03-18 10:26:19 +08:00
|
|
|
if (flags & DIF_HAVE_ELEM)
|
|
|
|
goto no_designator;
|
2019-04-29 19:53:07 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (gnu_ext && tok >= TOK_UIDENT) {
|
|
|
|
l = tok, next();
|
|
|
|
if (tok == ':')
|
2019-04-29 19:53:07 +08:00
|
|
|
goto struct_field;
|
2021-10-22 13:39:54 +08:00
|
|
|
unget_tok(l);
|
2019-04-29 19:53:07 +08:00
|
|
|
}
|
|
|
|
|
2017-04-16 02:07:26 +08:00
|
|
|
/* NOTE: we only support ranges for last designator */
|
2021-10-22 13:39:54 +08:00
|
|
|
while (nb_elems == 1 && (tok == '[' || tok == '.')) {
|
|
|
|
if (tok == '[') {
|
2009-05-06 02:18:10 +08:00
|
|
|
if (!(type->t & VT_ARRAY))
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("array type");
|
|
|
|
next();
|
|
|
|
index = index_last = expr_const();
|
|
|
|
if (tok == TOK_DOTS && gnu_ext) {
|
|
|
|
next();
|
|
|
|
index_last = expr_const();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(']');
|
2017-04-16 02:07:26 +08:00
|
|
|
s = type->ref;
|
2021-10-22 13:39:54 +08:00
|
|
|
decl_design_flex(p, s, index_last);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (index < 0 || index_last >= s->c || index_last < index)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("index exceeds array bounds or range is empty");
|
2017-05-06 11:28:13 +08:00
|
|
|
if (cur_field)
|
|
|
|
(*cur_field)->c = index_last;
|
2009-05-06 02:18:10 +08:00
|
|
|
type = pointed_type(type);
|
|
|
|
elem_size = type_size(type, &align);
|
|
|
|
c += index * elem_size;
|
|
|
|
nb_elems = index_last - index + 1;
|
|
|
|
} else {
|
2019-07-22 03:14:58 +08:00
|
|
|
int cumofs;
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
|
|
|
l = tok;
|
2009-05-06 02:18:10 +08:00
|
|
|
struct_field:
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
if ((type->t & VT_BTYPE) != VT_STRUCT)
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("struct/union type");
|
2019-07-22 03:14:58 +08:00
|
|
|
cumofs = 0;
|
2019-04-11 06:30:41 +08:00
|
|
|
f = find_field(type, l, &cumofs);
|
2017-04-16 02:07:26 +08:00
|
|
|
if (cur_field)
|
2009-05-06 02:18:10 +08:00
|
|
|
*cur_field = f;
|
2017-03-07 05:38:45 +08:00
|
|
|
type = &f->type;
|
2022-12-22 20:08:36 +08:00
|
|
|
c += cumofs;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-04-16 02:07:26 +08:00
|
|
|
cur_field = NULL;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-04-16 02:07:26 +08:00
|
|
|
if (!cur_field) {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '=') {
|
|
|
|
next();
|
2017-04-16 02:07:26 +08:00
|
|
|
} else if (!gnu_ext) {
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("=");
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
} else {
|
2019-03-18 10:26:19 +08:00
|
|
|
no_designator:
|
2009-05-06 02:18:10 +08:00
|
|
|
if (type->t & VT_ARRAY) {
|
2016-08-01 11:30:55 +08:00
|
|
|
index = (*cur_field)->c;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
s = type->ref;
|
2021-10-22 13:39:54 +08:00
|
|
|
decl_design_flex(p, s, index);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (index >= s->c)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("too many initializers");
|
2009-05-06 02:18:10 +08:00
|
|
|
type = pointed_type(type);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
elem_size = type_size(type, &align);
|
|
|
|
c += index * elem_size;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
f = *cur_field;
|
2017-05-01 12:04:19 +08:00
|
|
|
while (f && (f->v & SYM_FIRST_ANOM) && (f->type.t & VT_BITFIELD))
|
2017-04-30 04:09:10 +08:00
|
|
|
*cur_field = f = f->next;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (!f)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("too many initializers");
|
2017-03-07 05:38:45 +08:00
|
|
|
type = &f->type;
|
2009-05-06 02:18:10 +08:00
|
|
|
c += f->c;
|
|
|
|
}
|
|
|
|
}
|
2020-09-19 04:53:41 +08:00
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (!elem_size) /* for structs */
|
|
|
|
elem_size = type_size(type, &align);
|
2020-09-19 04:53:41 +08:00
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
/* Using designators the same element can be initialized more
|
|
|
|
than once. In that case we need to delete possibly already
|
|
|
|
existing relocations. */
|
|
|
|
if (!(flags & DIF_SIZE_ONLY) && c - corig < al) {
|
|
|
|
decl_design_delrels(p->sec, c, elem_size * nb_elems);
|
|
|
|
flags &= ~DIF_CLEAR; /* mark stack dirty too */
|
2020-10-01 23:50:20 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
decl_initializer(p, type, c, flags & ~DIF_FIRST);
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2019-03-18 10:26:19 +08:00
|
|
|
if (!(flags & DIF_SIZE_ONLY) && nb_elems > 1) {
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
Sym aref = {0};
|
|
|
|
CType t1;
|
2009-05-06 02:18:10 +08:00
|
|
|
int i;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (p->sec || (type->t & VT_ARRAY)) {
|
|
|
|
/* make init_putv/vstore believe it were a struct */
|
|
|
|
aref.c = elem_size;
|
|
|
|
t1.t = VT_STRUCT, t1.ref = &aref;
|
|
|
|
type = &t1;
|
|
|
|
}
|
|
|
|
if (p->sec)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_ref(type, p->sec, c, elem_size);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(type, VT_LOCAL|VT_LVAL, c);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
for (i = 1; i < nb_elems; i++) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vdup();
|
|
|
|
init_putv(p, type, c + elem_size * i);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2020-10-01 23:50:20 +08:00
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
c += nb_elems * elem_size;
|
2017-05-06 11:28:13 +08:00
|
|
|
if (c - corig > al)
|
|
|
|
al = c - corig;
|
|
|
|
return al;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2016-07-24 06:43:49 +08:00
|
|
|
/* store a value or an expression directly in global data or in local array */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void init_putv(init_params *p, CType *type, unsigned long c)
|
2016-07-24 06:43:49 +08:00
|
|
|
{
|
2017-07-09 18:38:59 +08:00
|
|
|
int bt;
|
2016-07-24 06:43:49 +08:00
|
|
|
void *ptr;
|
|
|
|
CType dtype;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
int size, align;
|
|
|
|
Section *sec = p->sec;
|
2021-01-24 23:20:48 +08:00
|
|
|
uint64_t val;
|
2016-07-24 06:43:49 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
dtype = *type;
|
|
|
|
dtype.t &= ~VT_CONSTANT; /* need to do that to avoid false warning */
|
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
size = type_size(type, &align);
|
|
|
|
if (type->t & VT_BITFIELD)
|
|
|
|
size = (BIT_POS(type->t) + BIT_SIZE(type->t) + 7) / 8;
|
2021-10-22 13:39:54 +08:00
|
|
|
init_assert(p, c + size);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
if (sec) {
|
|
|
|
/* XXX: not portable */
|
|
|
|
/* XXX: generate error if incorrect relocation */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_assign_cast(&dtype);
|
2009-05-06 02:18:10 +08:00
|
|
|
bt = type->t & VT_BTYPE;
|
2017-07-16 18:10:00 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r & VT_SYM)
|
2017-07-16 18:10:00 +08:00
|
|
|
&& bt != VT_PTR
|
|
|
|
&& (bt != (PTR_SIZE == 8 ? VT_LLONG : VT_INT)
|
|
|
|
|| (type->t & VT_BITFIELD))
|
2021-10-22 13:39:54 +08:00
|
|
|
&& !((vtop->r & VT_CONST) && vtop->sym->v >= SYM_FIRST_ANOM)
|
2017-07-16 18:10:00 +08:00
|
|
|
)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("initializer element is not computable at load time");
|
2017-07-16 18:10:00 +08:00
|
|
|
|
|
|
|
if (NODATA_WANTED) {
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
2017-07-16 18:10:00 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
ptr = sec->data + c;
|
2021-10-22 13:39:54 +08:00
|
|
|
val = vtop->c.i;
|
2017-07-16 18:10:00 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* XXX: make code faster ? */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((vtop->r & (VT_SYM|VT_CONST)) == (VT_SYM|VT_CONST) &&
|
|
|
|
vtop->sym->v >= SYM_FIRST_ANOM &&
|
2017-05-08 12:38:09 +08:00
|
|
|
/* XXX This rejects compound literals like
|
2016-07-24 06:43:49 +08:00
|
|
|
'(void *){ptr}'. The problem is that '&sym' is
|
|
|
|
represented the same way, which would be ruled out
|
|
|
|
by the SYM_FIRST_ANOM check above, but also '"string"'
|
|
|
|
in 'char *p = "string"' is represented the same
|
|
|
|
with the type being VT_PTR and the symbol being an
|
|
|
|
anonymous one. That is, there's no difference in vtop
|
|
|
|
between '(void *){x}' and '&(void *){x}'. Ignore
|
|
|
|
pointer typed entities here. Hopefully no real code
|
2020-06-21 03:56:53 +08:00
|
|
|
will ever use compound literals with scalar type. */
|
2021-10-22 13:39:54 +08:00
|
|
|
(vtop->type.t & VT_BTYPE) != VT_PTR) {
|
2016-07-24 06:43:49 +08:00
|
|
|
/* These come from compound literals, memcpy stuff over. */
|
|
|
|
Section *ssec;
|
2017-11-27 11:03:03 +08:00
|
|
|
ElfSym *esym;
|
2016-10-04 01:21:10 +08:00
|
|
|
ElfW_Rel *rel;
|
2021-10-22 13:39:54 +08:00
|
|
|
esym = elfsym(vtop->sym);
|
|
|
|
ssec = tcc_state->sections[esym->st_shndx];
|
|
|
|
memmove (ptr, ssec->data + esym->st_value + (int)vtop->c.i, size);
|
2016-10-04 01:21:10 +08:00
|
|
|
if (ssec->reloc) {
|
|
|
|
/* We need to copy over all memory contents, and that
|
|
|
|
includes relocations. Use the fact that relocs are
|
|
|
|
created it order, so look from the end of relocs
|
|
|
|
until we hit one before the copied region. */
|
2021-02-13 07:35:29 +08:00
|
|
|
unsigned long relofs = ssec->reloc->data_offset;
|
|
|
|
while (relofs >= sizeof(*rel)) {
|
|
|
|
relofs -= sizeof(*rel);
|
|
|
|
rel = (ElfW_Rel*)(ssec->reloc->data + relofs);
|
2016-10-04 01:21:10 +08:00
|
|
|
if (rel->r_offset >= esym->st_value + size)
|
|
|
|
continue;
|
|
|
|
if (rel->r_offset < esym->st_value)
|
|
|
|
break;
|
|
|
|
put_elf_reloca(symtab_section, sec,
|
|
|
|
c + rel->r_offset - esym->st_value,
|
|
|
|
ELFW(R_TYPE)(rel->r_info),
|
|
|
|
ELFW(R_SYM)(rel->r_info),
|
2017-05-13 14:59:06 +08:00
|
|
|
#if PTR_SIZE == 8
|
2016-10-04 01:21:10 +08:00
|
|
|
rel->r_addend
|
|
|
|
#else
|
|
|
|
0
|
|
|
|
#endif
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
2016-07-24 06:43:49 +08:00
|
|
|
} else {
|
2017-07-09 18:38:59 +08:00
|
|
|
if (type->t & VT_BITFIELD) {
|
|
|
|
int bit_pos, bit_size, bits, n;
|
|
|
|
unsigned char *p, v, m;
|
2021-10-22 13:39:54 +08:00
|
|
|
bit_pos = BIT_POS(vtop->type.t);
|
|
|
|
bit_size = BIT_SIZE(vtop->type.t);
|
2017-07-09 18:38:59 +08:00
|
|
|
p = (unsigned char*)ptr + (bit_pos >> 3);
|
|
|
|
bit_pos &= 7, bits = 0;
|
|
|
|
while (bit_size) {
|
|
|
|
n = 8 - bit_pos;
|
|
|
|
if (n > bit_size)
|
|
|
|
n = bit_size;
|
2021-01-24 23:20:48 +08:00
|
|
|
v = val >> bits << bit_pos;
|
2017-07-09 18:38:59 +08:00
|
|
|
m = ((1 << n) - 1) << bit_pos;
|
|
|
|
*p = (*p & ~m) | (v & m);
|
|
|
|
bits += n, bit_size -= n, bit_pos = 0, ++p;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
switch(bt) {
|
2016-07-24 06:43:49 +08:00
|
|
|
case VT_BOOL:
|
2021-01-24 23:20:48 +08:00
|
|
|
*(char *)ptr = val != 0;
|
|
|
|
break;
|
2016-07-24 06:43:49 +08:00
|
|
|
case VT_BYTE:
|
2021-01-24 23:20:48 +08:00
|
|
|
*(char *)ptr = val;
|
2016-07-24 06:43:49 +08:00
|
|
|
break;
|
|
|
|
case VT_SHORT:
|
2021-01-24 23:20:48 +08:00
|
|
|
write16le(ptr, val);
|
2016-07-24 06:43:49 +08:00
|
|
|
break;
|
2017-03-12 12:25:09 +08:00
|
|
|
case VT_FLOAT:
|
2021-01-24 23:20:48 +08:00
|
|
|
write32le(ptr, val);
|
2017-03-12 12:25:09 +08:00
|
|
|
break;
|
2016-07-24 06:43:49 +08:00
|
|
|
case VT_DOUBLE:
|
2021-01-24 23:20:48 +08:00
|
|
|
write64le(ptr, val);
|
2016-07-24 06:43:49 +08:00
|
|
|
break;
|
|
|
|
case VT_LDOUBLE:
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
#if defined TCC_IS_NATIVE_387
|
2021-01-24 23:20:48 +08:00
|
|
|
/* Host and target platform may be different but both have x87.
|
|
|
|
On windows, tcc does not use VT_LDOUBLE, except when it is a
|
|
|
|
cross compiler. In this case a mingw gcc as host compiler
|
|
|
|
comes here with 10-byte long doubles, while msvc or tcc won't.
|
|
|
|
tcc itself can still translate by asm.
|
|
|
|
In any case we avoid possibly random bytes 11 and 12.
|
|
|
|
*/
|
|
|
|
if (sizeof (long double) >= 10)
|
2021-10-22 13:39:54 +08:00
|
|
|
memcpy(ptr, &vtop->c.ld, 10);
|
2017-05-13 14:59:06 +08:00
|
|
|
#ifdef __TINYC__
|
|
|
|
else if (sizeof (long double) == sizeof (double))
|
2021-10-22 13:39:54 +08:00
|
|
|
__asm__("fldl %1\nfstpt %0\n" : "=m" (*ptr) : "m" (vtop->c.ld));
|
2017-05-13 14:59:06 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
else if (vtop->c.ld == 0.0)
|
2017-12-13 00:57:20 +08:00
|
|
|
;
|
2017-09-25 00:57:48 +08:00
|
|
|
else
|
2017-05-07 18:41:29 +08:00
|
|
|
#endif
|
2021-01-24 23:20:48 +08:00
|
|
|
/* For other platforms it should work natively, but may not work
|
|
|
|
for cross compilers */
|
2017-09-25 00:57:48 +08:00
|
|
|
if (sizeof(long double) == LDOUBLE_SIZE)
|
2021-10-22 13:39:54 +08:00
|
|
|
memcpy(ptr, &vtop->c.ld, LDOUBLE_SIZE);
|
2017-09-25 00:57:48 +08:00
|
|
|
else if (sizeof(double) == LDOUBLE_SIZE)
|
2021-10-22 13:39:54 +08:00
|
|
|
memcpy(ptr, &vtop->c.ld, LDOUBLE_SIZE);
|
2021-01-09 22:04:46 +08:00
|
|
|
#ifndef TCC_CROSS_TEST
|
2017-09-25 00:57:48 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("can't cross compile long double constants");
|
2021-01-09 22:04:46 +08:00
|
|
|
#endif
|
2016-07-24 06:43:49 +08:00
|
|
|
break;
|
2021-01-24 23:20:48 +08:00
|
|
|
|
|
|
|
#if PTR_SIZE == 8
|
|
|
|
/* intptr_t may need a reloc too, see tcctest.c:relocation_test() */
|
2016-07-24 06:43:49 +08:00
|
|
|
case VT_LLONG:
|
|
|
|
case VT_PTR:
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r & VT_SYM)
|
|
|
|
greloca(sec, vtop->sym, c, R_DATA_PTR, val);
|
2021-01-24 23:20:48 +08:00
|
|
|
else
|
|
|
|
write64le(ptr, val);
|
|
|
|
break;
|
|
|
|
case VT_INT:
|
|
|
|
write32le(ptr, val);
|
|
|
|
break;
|
2015-02-22 05:29:03 +08:00
|
|
|
#else
|
2021-01-24 23:20:48 +08:00
|
|
|
case VT_LLONG:
|
|
|
|
write64le(ptr, val);
|
|
|
|
break;
|
|
|
|
case VT_PTR:
|
|
|
|
case VT_INT:
|
2021-10-22 13:39:54 +08:00
|
|
|
if (vtop->r & VT_SYM)
|
|
|
|
greloc(sec, vtop->sym, c, R_DATA_PTR);
|
2021-01-24 23:20:48 +08:00
|
|
|
write32le(ptr, val);
|
|
|
|
break;
|
2015-02-22 05:29:03 +08:00
|
|
|
#endif
|
2016-07-24 06:43:49 +08:00
|
|
|
default:
|
2021-01-24 23:20:48 +08:00
|
|
|
//tcc_internal_error("unexpected type");
|
|
|
|
break;
|
2016-07-24 06:43:49 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
vtop--;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(&dtype, VT_LOCAL|VT_LVAL, c);
|
|
|
|
vswap();
|
|
|
|
vstore();
|
|
|
|
vpop();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 't' contains the type and storage info. 'c' is the offset of the
|
|
|
|
object in section 'sec'. If 'sec' is NULL, it means stack based
|
2019-03-18 10:26:19 +08:00
|
|
|
allocation. 'flags & DIF_FIRST' is true if array '{' must be read (multi
|
|
|
|
dimension implicit array init handling). 'flags & DIF_SIZE_ONLY' is true if
|
2009-05-06 02:18:10 +08:00
|
|
|
size only evaluation is wanted (only for arrays). */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void decl_initializer(init_params *p, CType *type, unsigned long c, int flags)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2020-06-21 03:56:53 +08:00
|
|
|
int len, n, no_oblock, i;
|
2016-08-01 11:30:55 +08:00
|
|
|
int size1, align1;
|
2009-05-06 02:18:10 +08:00
|
|
|
Sym *s, *f;
|
2016-08-01 11:30:55 +08:00
|
|
|
Sym indexsym;
|
2009-05-06 02:18:10 +08:00
|
|
|
CType *t1;
|
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
/* generate line number info */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes && !p->sec)
|
2022-05-09 23:02:09 +08:00
|
|
|
tcc_debug_line(tcc_state), tcc_tcov_check_line (tcc_state, 1);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!(flags & DIF_HAVE_ELEM) && tok != '{' &&
|
2016-08-01 11:30:55 +08:00
|
|
|
/* In case of strings we have special handling for arrays, so
|
|
|
|
don't consume them as initializer value (which would commit them
|
|
|
|
to some anonymous symbol). */
|
2021-10-22 13:39:54 +08:00
|
|
|
tok != TOK_LSTR && tok != TOK_STR &&
|
2021-05-16 00:40:16 +08:00
|
|
|
(!(flags & DIF_SIZE_ONLY)
|
|
|
|
/* a struct may be initialized from a struct of same type, as in
|
|
|
|
struct {int x,y;} a = {1,2}, b = {3,4}, c[] = {a,b};
|
|
|
|
In that case we need to parse the element in order to check
|
|
|
|
it for compatibility below */
|
|
|
|
|| (type->t & VT_BTYPE) == VT_STRUCT)
|
|
|
|
) {
|
|
|
|
int ncw_prev = nocode_wanted;
|
|
|
|
if ((flags & DIF_SIZE_ONLY) && !p->sec)
|
|
|
|
++nocode_wanted;
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_init_elem(!p->sec ? EXPR_ANY : EXPR_CONST);
|
2021-05-16 00:40:16 +08:00
|
|
|
nocode_wanted = ncw_prev;
|
2019-03-18 10:26:19 +08:00
|
|
|
flags |= DIF_HAVE_ELEM;
|
2016-07-31 11:43:17 +08:00
|
|
|
}
|
|
|
|
|
2021-05-16 00:40:16 +08:00
|
|
|
if (type->t & VT_ARRAY) {
|
2009-05-06 02:18:10 +08:00
|
|
|
no_oblock = 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (((flags & DIF_FIRST) && tok != TOK_LSTR && tok != TOK_STR) ||
|
|
|
|
tok == '{') {
|
|
|
|
skip('{');
|
2009-05-06 02:18:10 +08:00
|
|
|
no_oblock = 0;
|
|
|
|
}
|
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
s = type->ref;
|
|
|
|
n = s->c;
|
|
|
|
t1 = pointed_type(type);
|
|
|
|
size1 = type_size(t1, &align1);
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* only parse strings here if correct type (otherwise: handle
|
|
|
|
them as ((w)char *) expressions */
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((tok == TOK_LSTR &&
|
2009-05-06 02:18:10 +08:00
|
|
|
#ifdef TCC_TARGET_PE
|
|
|
|
(t1->t & VT_BTYPE) == VT_SHORT && (t1->t & VT_UNSIGNED)
|
|
|
|
#else
|
|
|
|
(t1->t & VT_BTYPE) == VT_INT
|
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
) || (tok == TOK_STR && (t1->t & VT_BTYPE) == VT_BYTE)) {
|
2017-05-06 11:28:13 +08:00
|
|
|
len = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
cstr_reset(&initstr);
|
|
|
|
if (size1 != (tok == TOK_STR ? 1 : sizeof(nwchar_t)))
|
|
|
|
tcc_error("unhandled string literal merging");
|
|
|
|
while (tok == TOK_STR || tok == TOK_LSTR) {
|
|
|
|
if (initstr.size)
|
|
|
|
initstr.size -= size1;
|
|
|
|
if (tok == TOK_STR)
|
|
|
|
len += tokc.str.size;
|
2009-05-06 02:18:10 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
len += tokc.str.size / sizeof(nwchar_t);
|
2020-06-21 03:56:53 +08:00
|
|
|
len--;
|
2021-10-22 13:39:54 +08:00
|
|
|
cstr_cat(&initstr, tokc.str.data, tokc.str.size);
|
|
|
|
next();
|
2020-06-21 03:56:53 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != ')' && tok != '}' && tok != ',' && tok != ';'
|
|
|
|
&& tok != TOK_EOF) {
|
2020-06-21 03:56:53 +08:00
|
|
|
/* Not a lone literal but part of a bigger expression. */
|
2021-10-22 13:39:54 +08:00
|
|
|
unget_tok(size1 == 1 ? TOK_STR : TOK_LSTR);
|
|
|
|
tokc.str.size = initstr.size;
|
|
|
|
tokc.str.data = initstr.data;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
goto do_init_array;
|
2020-06-21 03:56:53 +08:00
|
|
|
}
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
decl_design_flex(p, s, len);
|
2020-06-21 03:56:53 +08:00
|
|
|
if (!(flags & DIF_SIZE_ONLY)) {
|
2022-08-18 16:43:28 +08:00
|
|
|
int nb = n, ch;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (len < nb)
|
|
|
|
nb = len;
|
2020-06-21 03:56:53 +08:00
|
|
|
if (len > nb)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("initializer-string for array is too long");
|
2020-06-21 03:56:53 +08:00
|
|
|
/* in order to go faster for common case (char
|
|
|
|
string in global variable, we handle it
|
|
|
|
specifically */
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (p->sec && size1 == 1) {
|
2021-10-22 13:39:54 +08:00
|
|
|
init_assert(p, c + nb);
|
2020-06-21 03:56:53 +08:00
|
|
|
if (!NODATA_WANTED)
|
2021-10-22 13:39:54 +08:00
|
|
|
memcpy(p->sec->data + c, initstr.data, nb);
|
2020-06-21 03:56:53 +08:00
|
|
|
} else {
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
for(i=0;i<n;i++) {
|
|
|
|
if (i >= nb) {
|
|
|
|
/* only add trailing zero if enough storage (no
|
|
|
|
warning in this case since it is standard) */
|
|
|
|
if (flags & DIF_CLEAR)
|
|
|
|
break;
|
|
|
|
if (n - i >= 4) {
|
2021-10-22 13:39:54 +08:00
|
|
|
init_putz(p, c + i * size1, (n - i) * size1);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
ch = 0;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
} else if (size1 == 1)
|
2021-10-22 13:39:54 +08:00
|
|
|
ch = ((unsigned char *)initstr.data)[i];
|
2020-06-21 03:56:53 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
ch = ((nwchar_t *)initstr.data)[i];
|
|
|
|
vpushi(ch);
|
|
|
|
init_putv(p, t1, c + i * size1);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
|
|
|
|
do_init_array:
|
2016-08-01 11:30:55 +08:00
|
|
|
indexsym.c = 0;
|
|
|
|
f = &indexsym;
|
|
|
|
|
|
|
|
do_init_list:
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
/* zero memory once in advance */
|
|
|
|
if (!(flags & (DIF_CLEAR | DIF_SIZE_ONLY))) {
|
2021-10-22 13:39:54 +08:00
|
|
|
init_putz(p, c, n*size1);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
flags |= DIF_CLEAR;
|
|
|
|
}
|
|
|
|
|
2017-05-06 11:28:13 +08:00
|
|
|
len = 0;
|
2022-07-01 21:37:23 +08:00
|
|
|
/* GNU extension: if the initializer is empty for a flex array,
|
|
|
|
it's size is zero. We won't enter the loop, so set the size
|
|
|
|
now. */
|
|
|
|
decl_design_flex(p, s, len);
|
2021-10-22 13:39:54 +08:00
|
|
|
while (tok != '}' || (flags & DIF_HAVE_ELEM)) {
|
|
|
|
len = decl_designator(p, type, c, &f, flags, len);
|
2019-03-18 10:26:19 +08:00
|
|
|
flags &= ~DIF_HAVE_ELEM;
|
2016-08-01 11:30:55 +08:00
|
|
|
if (type->t & VT_ARRAY) {
|
2017-05-06 11:28:13 +08:00
|
|
|
++indexsym.c;
|
2016-08-01 11:30:55 +08:00
|
|
|
/* special test for multi dimensional arrays (may not
|
|
|
|
be strictly correct if designators are used at the
|
|
|
|
same time) */
|
2017-05-06 11:28:13 +08:00
|
|
|
if (no_oblock && len >= n*size1)
|
2016-08-01 11:30:55 +08:00
|
|
|
break;
|
|
|
|
} else {
|
2017-07-09 18:34:11 +08:00
|
|
|
if (s->type.t == VT_UNION)
|
2017-05-06 11:28:13 +08:00
|
|
|
f = NULL;
|
|
|
|
else
|
|
|
|
f = f->next;
|
2016-08-01 11:30:55 +08:00
|
|
|
if (no_oblock && f == NULL)
|
|
|
|
break;
|
|
|
|
}
|
2017-05-06 11:28:13 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '}')
|
2016-08-01 11:30:55 +08:00
|
|
|
break;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(',');
|
2016-08-01 11:30:55 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2016-08-01 11:30:55 +08:00
|
|
|
if (!no_oblock)
|
2021-10-22 13:39:54 +08:00
|
|
|
skip('}');
|
2021-05-16 00:40:16 +08:00
|
|
|
|
|
|
|
} else if ((flags & DIF_HAVE_ELEM)
|
|
|
|
/* Use i_c_parameter_t, to strip toplevel qualifiers.
|
|
|
|
The source type might have VT_CONSTANT set, which is
|
|
|
|
of course assignable to non-const elements. */
|
|
|
|
&& is_compatible_unqualified_types(type, &vtop->type)) {
|
|
|
|
goto one_elem;
|
|
|
|
|
2016-07-31 12:18:45 +08:00
|
|
|
} else if ((type->t & VT_BTYPE) == VT_STRUCT) {
|
2009-05-06 02:18:10 +08:00
|
|
|
no_oblock = 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
if ((flags & DIF_FIRST) || tok == '{') {
|
|
|
|
skip('{');
|
2009-05-06 02:18:10 +08:00
|
|
|
no_oblock = 0;
|
|
|
|
}
|
|
|
|
s = type->ref;
|
2010-06-15 23:02:09 +08:00
|
|
|
f = s->next;
|
2009-05-06 02:18:10 +08:00
|
|
|
n = s->c;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
size1 = 1;
|
2016-08-01 11:30:55 +08:00
|
|
|
goto do_init_list;
|
2021-05-16 00:40:16 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
} else if (tok == '{') {
|
2019-03-18 10:26:19 +08:00
|
|
|
if (flags & DIF_HAVE_ELEM)
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(';');
|
|
|
|
next();
|
|
|
|
decl_initializer(p, type, c, flags & ~DIF_HAVE_ELEM);
|
|
|
|
skip('}');
|
2021-05-16 00:40:16 +08:00
|
|
|
|
|
|
|
} else one_elem: if ((flags & DIF_SIZE_ONLY)) {
|
2016-08-01 11:30:55 +08:00
|
|
|
/* If we supported only ISO C we wouldn't have to accept calling
|
2019-03-18 10:26:19 +08:00
|
|
|
this on anything than an array if DIF_SIZE_ONLY (and even then
|
2016-08-01 11:30:55 +08:00
|
|
|
only on the outermost level, so no recursion would be needed),
|
|
|
|
because initializing a flex array member isn't supported.
|
|
|
|
But GNU C supports it, so we need to recurse even into
|
2019-03-18 10:26:19 +08:00
|
|
|
subfields of structs and arrays when DIF_SIZE_ONLY is set. */
|
2009-05-06 02:18:10 +08:00
|
|
|
/* just skip expression */
|
2021-05-16 00:40:16 +08:00
|
|
|
if (flags & DIF_HAVE_ELEM)
|
|
|
|
vpop();
|
|
|
|
else
|
|
|
|
skip_or_save_block(NULL);
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2019-03-18 10:26:19 +08:00
|
|
|
if (!(flags & DIF_HAVE_ELEM)) {
|
2016-07-31 11:43:17 +08:00
|
|
|
/* This should happen only when we haven't parsed
|
|
|
|
the init element above for fear of committing a
|
|
|
|
string constant to memory too early. */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != TOK_STR && tok != TOK_LSTR)
|
|
|
|
expect("string constant");
|
|
|
|
parse_init_elem(!p->sec ? EXPR_ANY : EXPR_CONST);
|
2016-07-31 11:43:17 +08:00
|
|
|
}
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (!p->sec && (flags & DIF_CLEAR) /* container was already zero'd */
|
2021-10-22 13:39:54 +08:00
|
|
|
&& (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST
|
|
|
|
&& vtop->c.i == 0
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
&& btype_size(type->t & VT_BTYPE) /* not for fp constants */
|
|
|
|
)
|
2021-10-22 13:39:54 +08:00
|
|
|
vpop();
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
else
|
2021-10-22 13:39:54 +08:00
|
|
|
init_putv(p, type, c);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* parse an initializer for type 't' if 'has_init' is non zero, and
|
|
|
|
allocate space in local or global data space ('r' is either
|
|
|
|
VT_LOCAL or VT_CONST). If 'v' is non zero, then an associated
|
2015-11-20 18:22:56 +08:00
|
|
|
variable 'v' of scope 'scope' is declared before initializers
|
|
|
|
are parsed. If 'v' is zero, then a reference to the new object
|
|
|
|
is put in the value stack. If 'has_init' is 2, a special parsing
|
|
|
|
is done to handle string constants. */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void decl_initializer_alloc(CType *type, AttributeDef *ad, int r,
|
2022-10-15 02:10:38 +08:00
|
|
|
int has_init, int v, int global)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2017-03-12 12:25:09 +08:00
|
|
|
int size, align, addr;
|
2016-10-02 02:26:50 +08:00
|
|
|
TokenString *init_str = NULL;
|
2017-07-21 04:21:27 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
Section *sec;
|
2011-03-19 08:50:42 +08:00
|
|
|
Sym *flexible_array;
|
2021-02-16 02:05:26 +08:00
|
|
|
Sym *sym;
|
2021-10-22 13:39:54 +08:00
|
|
|
int saved_nocode_wanted = nocode_wanted;
|
2017-07-16 18:10:00 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2021-10-22 13:39:54 +08:00
|
|
|
int bcheck = tcc_state->do_bounds_check && !NODATA_WANTED;
|
2017-07-16 18:10:00 +08:00
|
|
|
#endif
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
init_params p = {0};
|
2017-07-16 18:10:00 +08:00
|
|
|
|
2018-06-01 05:51:51 +08:00
|
|
|
/* Always allocate static or global variables */
|
|
|
|
if (v && (r & VT_VALMASK) == VT_CONST)
|
2022-08-20 18:58:56 +08:00
|
|
|
nocode_wanted |= DATA_ONLY_WANTED;
|
2018-06-01 05:51:51 +08:00
|
|
|
|
2011-03-19 08:50:42 +08:00
|
|
|
flexible_array = NULL;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
size = type_size(type, &align);
|
|
|
|
|
|
|
|
/* exactly one flexible array may be initialized, either the
|
|
|
|
toplevel array or the last member of the toplevel struct */
|
|
|
|
|
|
|
|
if (size < 0) {
|
|
|
|
/* If the base type itself was an array type of unspecified size
|
|
|
|
(like in 'typedef int arr[]; arr x = {1};') then we will
|
|
|
|
overwrite the unknown size by the real one for this decl.
|
|
|
|
We need to unshare the ref symbol holding that size. */
|
2021-10-22 13:39:54 +08:00
|
|
|
type->ref = sym_push(SYM_FIELD, &type->ref->type, 0, type->ref->c);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
p.flex_array_ref = type->ref;
|
|
|
|
|
|
|
|
} else if (has_init && (type->t & VT_BTYPE) == VT_STRUCT) {
|
2013-07-24 23:06:13 +08:00
|
|
|
Sym *field = type->ref->next;
|
|
|
|
if (field) {
|
|
|
|
while (field->next)
|
|
|
|
field = field->next;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (field->type.t & VT_ARRAY && field->type.ref->c < 0) {
|
2013-07-24 23:06:13 +08:00
|
|
|
flexible_array = field;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
p.flex_array_ref = field->type.ref;
|
|
|
|
size = -1;
|
|
|
|
}
|
2013-07-24 23:06:13 +08:00
|
|
|
}
|
2011-03-19 08:50:42 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (size < 0) {
|
|
|
|
/* If unknown size, do a dry-run 1st pass */
|
2015-07-30 04:53:57 +08:00
|
|
|
if (!has_init)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("unknown type size");
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
if (has_init == 2) {
|
|
|
|
/* only get strings */
|
2021-10-22 13:39:54 +08:00
|
|
|
init_str = tok_str_alloc();
|
|
|
|
while (tok == TOK_STR || tok == TOK_LSTR) {
|
|
|
|
tok_str_add_tok(init_str);
|
|
|
|
next();
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
tok_str_add(init_str, -1);
|
|
|
|
tok_str_add(init_str, 0);
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
} else
|
2021-10-22 13:39:54 +08:00
|
|
|
skip_or_save_block(&init_str);
|
|
|
|
unget_tok(0);
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2017-07-21 04:21:27 +08:00
|
|
|
/* compute size */
|
2021-10-22 13:39:54 +08:00
|
|
|
begin_macro(init_str, 1);
|
|
|
|
next();
|
|
|
|
decl_initializer(&p, type, 0, DIF_FIRST | DIF_SIZE_ONLY);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* prepare second initializer parsing */
|
2021-10-22 13:39:54 +08:00
|
|
|
macro_ptr = init_str->str;
|
|
|
|
next();
|
2021-02-14 10:37:25 +08:00
|
|
|
|
2015-03-03 20:44:29 +08:00
|
|
|
/* if still unknown size, error */
|
2009-05-06 02:18:10 +08:00
|
|
|
size = type_size(type, &align);
|
2015-07-30 04:53:57 +08:00
|
|
|
if (size < 0)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("unknown type size");
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
|
|
|
|
/* If there's a flex member and it was used in the initializer
|
|
|
|
adjust size. */
|
|
|
|
if (flexible_array && flexible_array->type.ref->c > 0)
|
|
|
|
size += flexible_array->type.ref->c
|
|
|
|
* pointed_size(&flexible_array->type);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* take into account specified alignment if bigger */
|
2014-01-07 21:57:07 +08:00
|
|
|
if (ad->a.aligned) {
|
2016-10-09 08:41:34 +08:00
|
|
|
int speca = 1 << (ad->a.aligned - 1);
|
|
|
|
if (speca > align)
|
|
|
|
align = speca;
|
2014-01-07 21:57:07 +08:00
|
|
|
} else if (ad->a.packed) {
|
2009-05-06 02:18:10 +08:00
|
|
|
align = 1;
|
|
|
|
}
|
2017-07-16 18:10:00 +08:00
|
|
|
|
2018-06-01 05:51:51 +08:00
|
|
|
if (!v && NODATA_WANTED)
|
2017-07-16 18:10:00 +08:00
|
|
|
size = 0, align = 1;
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
if ((r & VT_VALMASK) == VT_LOCAL) {
|
|
|
|
sec = NULL;
|
2009-12-20 05:22:43 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2020-01-15 15:53:19 +08:00
|
|
|
if (bcheck && v) {
|
2020-01-16 08:19:59 +08:00
|
|
|
/* add padding between stack variables for bound checking */
|
2021-10-22 13:39:54 +08:00
|
|
|
loc -= align;
|
2011-04-07 00:17:03 +08:00
|
|
|
}
|
2009-12-20 05:22:43 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
loc = (loc - size) & -align;
|
|
|
|
addr = loc;
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
p.local_offset = addr + size;
|
2009-12-20 05:22:43 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2020-01-15 15:53:19 +08:00
|
|
|
if (bcheck && v) {
|
2020-01-16 08:19:59 +08:00
|
|
|
/* add padding between stack variables for bound checking */
|
2021-10-22 13:39:54 +08:00
|
|
|
loc -= align;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2009-12-20 05:22:43 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
if (v) {
|
|
|
|
/* local variable */
|
2016-10-06 10:05:30 +08:00
|
|
|
#ifdef CONFIG_TCC_ASM
|
|
|
|
if (ad->asm_label) {
|
2021-10-22 13:39:54 +08:00
|
|
|
int reg = asm_parse_regvar(ad->asm_label);
|
2016-10-06 10:05:30 +08:00
|
|
|
if (reg >= 0)
|
|
|
|
r = (r & ~VT_VALMASK) | reg;
|
|
|
|
}
|
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = sym_push(v, type, r, addr);
|
2018-12-20 17:55:22 +08:00
|
|
|
if (ad->cleanup_func) {
|
2021-10-22 13:39:54 +08:00
|
|
|
Sym *cls = sym_push2(&all_cleanups,
|
|
|
|
SYM_FIELD | ++cur_scope->cl.n, 0, 0);
|
2019-01-28 08:21:38 +08:00
|
|
|
cls->prev_tok = sym;
|
|
|
|
cls->next = ad->cleanup_func;
|
2021-10-22 13:39:54 +08:00
|
|
|
cls->ncl = cur_scope->cl.s;
|
|
|
|
cur_scope->cl.s = cls;
|
2018-12-20 17:55:22 +08:00
|
|
|
}
|
|
|
|
|
2017-07-09 18:34:11 +08:00
|
|
|
sym->a = ad->a;
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
/* push local reference */
|
2021-10-22 13:39:54 +08:00
|
|
|
vset(type, r, addr);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-02-16 02:05:26 +08:00
|
|
|
sym = NULL;
|
2022-10-15 02:10:38 +08:00
|
|
|
if (v && global) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* see if the symbol was already defined */
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = sym_find(v);
|
2009-05-06 02:18:10 +08:00
|
|
|
if (sym) {
|
2021-02-18 15:43:04 +08:00
|
|
|
if (p.flex_array_ref && (sym->type.t & type->t & VT_ARRAY)
|
|
|
|
&& sym->type.ref->c > type->ref->c) {
|
|
|
|
/* flex array was already declared with explicit size
|
|
|
|
extern int arr[10];
|
|
|
|
int arr[] = { 1,2,3 }; */
|
|
|
|
type->ref->c = sym->type.ref->c;
|
|
|
|
size = type_size(type, &align);
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
patch_storage(sym, ad, type);
|
2017-12-04 03:43:48 +08:00
|
|
|
/* we accept several definitions of the same global variable. */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (!has_init && sym->c && elfsym(sym)->st_shndx != SHN_UNDEF)
|
2017-04-04 14:34:52 +08:00
|
|
|
goto no_alloc;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate symbol in corresponding section */
|
|
|
|
sec = ad->section;
|
|
|
|
if (!sec) {
|
2021-02-01 22:10:58 +08:00
|
|
|
CType *tp = type;
|
|
|
|
while ((tp->t & (VT_BTYPE|VT_ARRAY)) == (VT_PTR|VT_ARRAY))
|
|
|
|
tp = &tp->ref->type;
|
|
|
|
if (tp->t & VT_CONSTANT) {
|
|
|
|
sec = rodata_section;
|
|
|
|
} else if (has_init) {
|
2020-12-30 21:08:06 +08:00
|
|
|
sec = data_section;
|
2021-02-01 22:10:58 +08:00
|
|
|
/*if (tcc_state->g_debug & 4)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("rw data: %s", get_tok_str(v, 0));*/
|
|
|
|
} else if (tcc_state->nocommon)
|
2013-11-03 18:55:54 +08:00
|
|
|
sec = bss_section;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-04-04 14:34:52 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
if (sec) {
|
2021-10-22 13:39:54 +08:00
|
|
|
addr = section_add(sec, size, align);
|
2009-12-20 05:22:43 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2009-05-06 02:18:10 +08:00
|
|
|
/* add padding if bound check */
|
2017-07-16 18:10:00 +08:00
|
|
|
if (bcheck)
|
2021-10-22 13:39:54 +08:00
|
|
|
section_add(sec, 1, 1);
|
2009-12-20 05:22:43 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2017-03-12 12:25:09 +08:00
|
|
|
addr = align; /* SHN_COMMON is special, symbol value is align */
|
|
|
|
sec = common_section;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (v) {
|
2017-03-12 12:25:09 +08:00
|
|
|
if (!sym) {
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = sym_push(v, type, r | VT_SYM, 0);
|
|
|
|
patch_storage(sym, ad, NULL);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
/* update symbol definition */
|
2021-10-22 13:39:54 +08:00
|
|
|
put_extern_sym(sym, sec, addr, size);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
/* push global reference */
|
2021-10-22 13:39:54 +08:00
|
|
|
vpush_ref(type, sec, addr, size);
|
|
|
|
sym = vtop->sym;
|
|
|
|
vtop->r |= r;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-04-04 14:34:52 +08:00
|
|
|
|
2009-12-20 05:22:43 +08:00
|
|
|
#ifdef CONFIG_TCC_BCHECK
|
2009-05-06 02:18:10 +08:00
|
|
|
/* handles bounds now because the symbol must be defined
|
|
|
|
before for the relocation */
|
2017-07-16 18:10:00 +08:00
|
|
|
if (bcheck) {
|
2015-03-26 12:47:45 +08:00
|
|
|
addr_t *bounds_ptr;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
greloca(bounds_section, sym, bounds_section->data_offset, R_DATA_PTR, 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* then add global bound info */
|
2021-10-22 13:39:54 +08:00
|
|
|
bounds_ptr = section_ptr_add(bounds_section, 2 * sizeof(addr_t));
|
2009-05-06 02:18:10 +08:00
|
|
|
bounds_ptr[0] = 0; /* relocated */
|
|
|
|
bounds_ptr[1] = size;
|
|
|
|
}
|
2009-12-20 05:22:43 +08:00
|
|
|
#endif
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-04-04 14:34:52 +08:00
|
|
|
|
2016-08-01 11:30:55 +08:00
|
|
|
if (type->t & VT_VLA) {
|
|
|
|
int a;
|
|
|
|
|
2017-07-16 18:10:00 +08:00
|
|
|
if (NODATA_WANTED)
|
|
|
|
goto no_alloc;
|
|
|
|
|
2021-02-03 11:30:11 +08:00
|
|
|
/* save before-VLA stack pointer if needed */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (cur_scope->vla.num == 0) {
|
|
|
|
if (cur_scope->prev && cur_scope->prev->vla.num) {
|
|
|
|
cur_scope->vla.locorig = cur_scope->prev->vla.loc;
|
2021-02-03 11:30:11 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_vla_sp_save(loc -= PTR_SIZE);
|
|
|
|
cur_scope->vla.locorig = loc;
|
2021-02-03 11:30:11 +08:00
|
|
|
}
|
2016-08-01 11:30:55 +08:00
|
|
|
}
|
|
|
|
|
2021-12-08 17:49:28 +08:00
|
|
|
vpush_type_size(type, &a);
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_vla_alloc(type, a);
|
2017-12-13 00:57:20 +08:00
|
|
|
#if defined TCC_TARGET_PE && defined TCC_TARGET_X86_64
|
|
|
|
/* on _WIN64, because of the function args scratch area, the
|
|
|
|
result of alloca differs from RSP and is returned in RAX. */
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_vla_result(addr), addr = (loc -= PTR_SIZE);
|
2017-12-13 00:57:20 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_vla_sp_save(addr);
|
|
|
|
cur_scope->vla.loc = addr;
|
|
|
|
cur_scope->vla.num++;
|
2016-08-01 11:30:55 +08:00
|
|
|
} else if (has_init) {
|
tccgen: flex arrays etc.
Fixes potential writes past the allocated space with mostly
illegal flex array initializers. (60_errors_and_warnings.c
:test_var_array)
In exchange suspicious precautions such as section_reserve
or checks with sec->data_allocated were removed. (There is
an hard check 'init_assert()' for now but it's meant to be
just temporary)
Also, instead of filling holes, always memset(0) structures
& arrays on stack. Sometimes more efficient, sometimes isn't.
At least we can omit putting null initializers.
About array range inititializers: Reparsing tokens has a
small problem with sideeffects, for example
int c = 0, dd[] = { [0 ... 1] = ++c, [2 ... 3] = ++c };
Also, instead of 'squeeze_multi_relocs()', delete pre-existing
relocations in advance. This works even if secondary initializers
don't even have relocations, as with
[0 ... 7] = &stuff,
[4] = NULL
Also, in tcc.h: new macro "tcc_internal_error()"
2020-09-23 18:03:59 +08:00
|
|
|
p.sec = sec;
|
2021-10-22 13:39:54 +08:00
|
|
|
decl_initializer(&p, type, addr, DIF_FIRST);
|
2011-03-19 08:50:42 +08:00
|
|
|
/* patch flexible array member size back to -1, */
|
|
|
|
/* for possible subsequent similar declarations */
|
|
|
|
if (flexible_array)
|
|
|
|
flexible_array->type.ref->c = -1;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2017-04-04 14:34:52 +08:00
|
|
|
|
|
|
|
no_alloc:
|
2016-10-02 02:26:50 +08:00
|
|
|
/* restore parse state if needed */
|
|
|
|
if (init_str) {
|
2021-10-22 13:39:54 +08:00
|
|
|
end_macro();
|
|
|
|
next();
|
2016-10-02 02:26:50 +08:00
|
|
|
}
|
2017-07-16 18:10:00 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
nocode_wanted = saved_nocode_wanted;
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2022-03-21 18:40:43 +08:00
|
|
|
/* generate vla code saved in post_type() */
|
|
|
|
static void func_vla_arg_code(Sym *arg)
|
|
|
|
{
|
|
|
|
int align;
|
|
|
|
TokenString *vla_array_tok = NULL;
|
|
|
|
|
|
|
|
if (arg->type.ref)
|
|
|
|
func_vla_arg_code(arg->type.ref);
|
|
|
|
|
2022-04-12 13:03:33 +08:00
|
|
|
if ((arg->type.t & VT_VLA) && arg->type.ref->vla_array_str) {
|
2022-03-21 18:40:43 +08:00
|
|
|
loc -= type_size(&int_type, &align);
|
|
|
|
loc &= -align;
|
|
|
|
arg->type.ref->c = loc;
|
|
|
|
|
|
|
|
unget_tok(0);
|
|
|
|
vla_array_tok = tok_str_alloc();
|
|
|
|
vla_array_tok->str = arg->type.ref->vla_array_str;
|
|
|
|
begin_macro(vla_array_tok, 1);
|
|
|
|
next();
|
|
|
|
gexpr();
|
|
|
|
end_macro();
|
|
|
|
next();
|
|
|
|
vpush_type_size(&arg->type.ref->type, &align);
|
|
|
|
gen_op('*');
|
|
|
|
vset(&int_type, VT_LOCAL|VT_LVAL, arg->type.ref->c);
|
|
|
|
vswap();
|
|
|
|
vstore();
|
|
|
|
vpop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void func_vla_arg(Sym *sym)
|
|
|
|
{
|
|
|
|
Sym *arg;
|
|
|
|
|
|
|
|
for (arg = sym->type.ref->next; arg; arg = arg->next)
|
|
|
|
if (arg->type.t & VT_VLA)
|
|
|
|
func_vla_arg_code(arg);
|
|
|
|
}
|
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* parse a function defined by symbol 'sym' and generate its code in
|
|
|
|
'cur_text_section' */
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_function(Sym *sym)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2021-10-22 13:39:54 +08:00
|
|
|
struct scope f = { 0 };
|
|
|
|
cur_scope = root_scope = &f;
|
|
|
|
nocode_wanted = 0;
|
|
|
|
ind = cur_text_section->data_offset;
|
2018-04-07 05:01:45 +08:00
|
|
|
if (sym->a.aligned) {
|
2021-10-22 13:39:54 +08:00
|
|
|
size_t newoff = section_add(cur_text_section, 0,
|
2018-04-07 05:01:45 +08:00
|
|
|
1 << (sym->a.aligned - 1));
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_fill_nops(newoff - ind);
|
2018-04-07 05:01:45 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
/* NOTE: we patch the symbol size later */
|
2021-10-22 13:39:54 +08:00
|
|
|
put_extern_sym(sym, cur_text_section, ind, 0);
|
2020-01-18 05:58:39 +08:00
|
|
|
if (sym->type.ref->f.func_ctor)
|
2021-10-22 13:39:54 +08:00
|
|
|
add_array (tcc_state, ".init_array", sym->c);
|
2020-01-18 05:58:39 +08:00
|
|
|
if (sym->type.ref->f.func_dtor)
|
2021-10-22 13:39:54 +08:00
|
|
|
add_array (tcc_state, ".fini_array", sym->c);
|
2020-05-05 20:47:00 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
funcname = get_tok_str(sym->v, NULL);
|
|
|
|
func_ind = ind;
|
|
|
|
func_vt = sym->type.ref->type;
|
|
|
|
func_var = sym->type.ref->f.func_type == FUNC_ELLIPSIS;
|
2020-05-05 20:47:00 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* put debug symbol */
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_debug_funcstart(tcc_state, sym);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* push a dummy symbol to enable local sym storage */
|
2021-10-22 13:39:54 +08:00
|
|
|
sym_push2(&local_stack, SYM_FIELD, 0, 0);
|
|
|
|
local_scope = 1; /* for function parameters */
|
|
|
|
gfunc_prolog(sym);
|
2022-12-02 20:09:47 +08:00
|
|
|
tcc_debug_prolog_epilog(tcc_state, 0);
|
2021-10-22 13:39:54 +08:00
|
|
|
local_scope = 0;
|
|
|
|
rsym = 0;
|
|
|
|
clear_temp_local_var_list();
|
2022-03-21 18:40:43 +08:00
|
|
|
func_vla_arg(sym);
|
2021-10-22 13:39:54 +08:00
|
|
|
block(0);
|
|
|
|
gsym(rsym);
|
|
|
|
nocode_wanted = 0;
|
2012-10-26 01:40:04 +08:00
|
|
|
/* reset local stack */
|
2021-10-22 13:39:54 +08:00
|
|
|
pop_local_syms(NULL, 0);
|
2022-12-02 20:09:47 +08:00
|
|
|
tcc_debug_prolog_epilog(tcc_state, 1);
|
2021-10-22 13:39:54 +08:00
|
|
|
gfunc_epilog();
|
|
|
|
cur_text_section->data_offset = ind;
|
|
|
|
local_scope = 0;
|
|
|
|
label_pop(&global_label_stack, NULL, 0);
|
|
|
|
sym_pop(&all_cleanups, NULL, 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* patch symbol size */
|
2021-10-22 13:39:54 +08:00
|
|
|
elfsym(sym)->st_size = ind - func_ind;
|
2019-06-22 19:18:54 +08:00
|
|
|
/* end of function */
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_debug_funcend(tcc_state, ind - func_ind);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* It's better to crash than to generate wrong code */
|
|
|
|
cur_text_section = NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
funcname = ""; /* for safety */
|
|
|
|
func_vt.t = VT_VOID; /* for safety */
|
|
|
|
func_var = 0; /* for safety */
|
|
|
|
ind = 0; /* for safety */
|
2022-05-09 23:02:09 +08:00
|
|
|
func_ind = -1;
|
2022-08-20 18:58:56 +08:00
|
|
|
nocode_wanted = DATA_ONLY_WANTED;
|
2021-10-22 13:39:54 +08:00
|
|
|
check_vstack();
|
2020-01-18 05:58:39 +08:00
|
|
|
/* do this after funcend debug info */
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void gen_inline_functions(TCCState *s)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
|
|
|
Sym *sym;
|
2018-06-08 21:31:40 +08:00
|
|
|
int inline_generated, i;
|
2009-06-30 03:14:53 +08:00
|
|
|
struct InlineFunc *fn;
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_open_bf(s, ":inline:", 0);
|
2009-05-06 02:18:10 +08:00
|
|
|
/* iterate while inline function are referenced */
|
2017-07-14 23:42:48 +08:00
|
|
|
do {
|
2009-05-06 02:18:10 +08:00
|
|
|
inline_generated = 0;
|
2021-10-22 13:39:54 +08:00
|
|
|
for (i = 0; i < s->nb_inline_fns; ++i) {
|
|
|
|
fn = s->inline_fns[i];
|
2009-06-30 03:14:53 +08:00
|
|
|
sym = fn->sym;
|
2019-06-22 10:00:52 +08:00
|
|
|
if (sym && (sym->c || !(sym->type.t & VT_INLINE))) {
|
2019-06-18 01:28:51 +08:00
|
|
|
/* the function was used or forced (and then not internal):
|
|
|
|
generate its code and convert it to a normal function */
|
2009-06-30 03:14:53 +08:00
|
|
|
fn->sym = NULL;
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_debug_putfile(s, fn->filename);
|
|
|
|
begin_macro(fn->func_str, 1);
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
cur_text_section = text_section;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_function(sym);
|
|
|
|
end_macro();
|
2009-05-06 02:18:10 +08:00
|
|
|
|
|
|
|
inline_generated = 1;
|
|
|
|
}
|
|
|
|
}
|
2017-07-14 23:42:48 +08:00
|
|
|
} while (inline_generated);
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_close();
|
2016-10-18 05:24:01 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
static void free_inline_functions(TCCState *s)
|
2016-10-18 05:24:01 +08:00
|
|
|
{
|
|
|
|
int i;
|
tccpp: fix issues, add tests
* fix some macro expansion issues
* add some pp tests in tests/pp
* improved tcc -E output for better diff'ability
* remove -dD feature (quirky code, exotic feature,
didn't work well)
Based partially on ideas / researches from PipCet
Some issues remain with VA_ARGS macros (if used in a
rather tricky way).
Also, to keep it simple, the pp doesn't automtically
add any extra spaces to separate tokens which otherwise
would form wrong tokens if re-read from tcc -E output
(such as '+' '=') GCC does that, other compilers don't.
* cleanups
- #line 01 "file" / # 01 "file" processing
- #pragma comment(lib,"foo")
- tcc -E: forward some pragmas to output (pack, comment(lib))
- fix macro parameter list parsing mess from
a3fc54345949535524d01319e1ca6378b7c2c201
a715d7143d9d17da17e67fec6af1c01409a71a31
(some coffee might help, next time ;)
- introduce TOK_PPSTR - to have character constants as
written in the file (similar to TOK_PPNUM)
- allow '\' appear in macros
- new functions begin/end_macro to:
- fix switching macro levels during expansion
- allow unget_tok to unget more than one tok
- slight speedup by using bitflags in isidnum_table
Also:
- x86_64.c : fix decl after statements
- i386-gen,c : fix a vstack leak with VLA on windows
- configure/Makefile : build on windows (MSYS) was broken
- tcc_warning: fflush stderr to keep output order (win32)
2015-05-09 20:29:39 +08:00
|
|
|
/* free tokens of unused inline functions */
|
2021-10-22 13:39:54 +08:00
|
|
|
for (i = 0; i < s->nb_inline_fns; ++i) {
|
|
|
|
struct InlineFunc *fn = s->inline_fns[i];
|
2016-11-12 03:25:13 +08:00
|
|
|
if (fn->sym)
|
2021-10-22 13:39:54 +08:00
|
|
|
tok_str_free(fn->func_str);
|
2009-07-14 10:46:35 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
dynarray_reset(&s->inline_fns, &s->nb_inline_fns);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2022-12-10 08:12:44 +08:00
|
|
|
static void do_Static_assert(void){
|
|
|
|
CString error_str;
|
|
|
|
int c;
|
|
|
|
|
|
|
|
next();
|
|
|
|
skip('(');
|
|
|
|
c = expr_const();
|
|
|
|
|
|
|
|
if (tok == ')') {
|
|
|
|
if (!c)
|
|
|
|
tcc_error("_Static_assert fail");
|
|
|
|
next();
|
|
|
|
goto static_assert_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
skip(',');
|
|
|
|
parse_mult_str(&error_str, "string constant");
|
|
|
|
if (c == 0)
|
|
|
|
tcc_error("%s", (char *)error_str.data);
|
|
|
|
cstr_free(&error_str);
|
|
|
|
skip(')');
|
|
|
|
static_assert_out:
|
|
|
|
skip(';');
|
|
|
|
}
|
|
|
|
|
2022-10-15 02:10:38 +08:00
|
|
|
/* 'l' is VT_LOCAL or VT_CONST to define default storage type
|
|
|
|
or VT_CMP if parsing old style parameter list
|
|
|
|
or VT_JMP if parsing c99 for decl: for (int i = 0, ...) */
|
|
|
|
static int decl(int l)
|
2009-05-06 02:18:10 +08:00
|
|
|
{
|
2020-10-24 03:38:53 +08:00
|
|
|
int v, has_init, r, oldint;
|
2009-05-06 02:18:10 +08:00
|
|
|
CType type, btype;
|
|
|
|
Sym *sym;
|
2019-01-28 08:21:38 +08:00
|
|
|
AttributeDef ad, adbase;
|
2012-10-26 01:40:04 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
while (1) {
|
2022-12-10 08:12:44 +08:00
|
|
|
if (tok == TOK_STATIC_ASSERT) {
|
|
|
|
do_Static_assert();
|
|
|
|
continue;
|
|
|
|
}
|
2020-10-24 03:38:53 +08:00
|
|
|
|
|
|
|
oldint = 0;
|
2022-07-04 21:24:46 +08:00
|
|
|
if (!parse_btype(&btype, &adbase, l == VT_LOCAL)) {
|
2022-10-15 02:10:38 +08:00
|
|
|
if (l == VT_JMP)
|
2011-03-09 05:36:04 +08:00
|
|
|
return 0;
|
2017-03-11 09:13:59 +08:00
|
|
|
/* skip redundant ';' if not in old parameter decl scope */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ';' && l != VT_CMP) {
|
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
continue;
|
|
|
|
}
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
if (l != VT_CONST)
|
|
|
|
break;
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == TOK_ASM1 || tok == TOK_ASM2 || tok == TOK_ASM3) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* global asm block */
|
2021-10-22 13:39:54 +08:00
|
|
|
asm_global_instr();
|
2009-05-06 02:18:10 +08:00
|
|
|
continue;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok >= TOK_UIDENT) {
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
/* special test for old K&R protos without explicit int
|
|
|
|
type. Only accepted when defining global data */
|
|
|
|
btype.t = VT_INT;
|
2020-10-24 03:38:53 +08:00
|
|
|
oldint = 1;
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != TOK_EOF)
|
|
|
|
expect("declaration");
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
various stuff
win32/Makefile ("for cygwin") removed
- On cygwin, the normal ./configure && make can be used with either
cygwin's "GCC for Win32 Toolchain"
./configure --cross-prefix=i686-w64-mingw32-
or with an existing tcc:
./configure --cc=<old-tccdir>/tcc.exe
tcctest.c:
- exclude test_high_clobbers() on _WIN64 (does not work)
tests2/95_bitfield.c:
- use 'signed char' for ARM (where default 'char' is unsigned)
tests:
- remove -I "expr" diff option to allow tests with
busybox-diff.
libtcc.c, tcc.c:
- removed -iwithprefix option. It is supposed to be
combined with -iprefix which we don't have either.
tccgen.c:
- fix assignments and return of 'void', as in
void f() {
void *p, *q;
*p = *q:
return *p;
}
This appears to be allowed but should do nothing.
tcc.h, libtcc.c, tccpp.c:
- Revert "Introduce VIP sysinclude paths which are always searched first"
This reverts commit 1d5e386b0a78393ac6b670c209a185849ec798a1.
The patch was giving tcc's system includes priority over -I which
is not how it should be.
tccelf.c:
- add DT_TEXTREL tag only if text relocations are actually
used (which is likely not the case on x86_64)
- prepare_dynamic_rel(): avoid relocation of unresolved
(weak) symbols
tccrun.c:
- for HAVE_SELINUX, use two mappings to the same (real) file.
(it was so once except the RX mapping wasn't used at all).
tccpe.c:
- fix relocation constant used for x86_64 (by Andrei E. Warentin)
- #ifndef _WIN32 do "chmod 755 ..." to get runnable exes on cygwin.
tccasm.c:
- keep forward asm labels static, otherwise they will endup
in dynsym eventually.
configure, Makefile:
- mingw32: respect ./configure options --bindir --docdir --libdir
- allow overriding tcc when building libtcc1.a and libtcc.def with
make XTCC=<tcc program to use>
- use $(wildcard ...) for install to allow installing just
a cross compiler for example
make cross-arm
make install
- use name <target>-libtcc1.a
build-tcc.bat:
- add options: -clean, -b bindir
2017-10-12 00:13:43 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2020-10-24 03:38:53 +08:00
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == ';') {
|
2015-07-30 04:53:57 +08:00
|
|
|
if ((btype.t & VT_BTYPE) == VT_STRUCT) {
|
2020-10-24 03:38:53 +08:00
|
|
|
v = btype.ref->v;
|
2015-07-30 04:53:57 +08:00
|
|
|
if (!(v & SYM_FIELD) && (v & ~SYM_STRUCT) >= SYM_FIRST_ANOM)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("unnamed struct/union that defines no instances");
|
|
|
|
next();
|
2017-07-09 18:38:25 +08:00
|
|
|
continue;
|
2015-07-30 04:53:57 +08:00
|
|
|
}
|
2017-07-09 18:38:25 +08:00
|
|
|
if (IS_ENUM(btype.t)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2017-07-09 18:38:25 +08:00
|
|
|
continue;
|
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
2020-10-24 03:38:53 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
while (1) { /* iterate thru each declaration */
|
|
|
|
type = btype;
|
2019-01-28 08:21:38 +08:00
|
|
|
ad = adbase;
|
2021-10-22 13:39:54 +08:00
|
|
|
type_decl(&type, &ad, &v, TYPE_DIRECT);
|
2009-05-06 02:18:10 +08:00
|
|
|
#if 0
|
|
|
|
{
|
|
|
|
char buf[500];
|
2021-10-22 13:39:54 +08:00
|
|
|
type_to_str(buf, sizeof(buf), &type, get_tok_str(v, NULL));
|
2009-05-06 02:18:10 +08:00
|
|
|
printf("type = '%s'\n", buf);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if ((type.t & VT_BTYPE) == VT_FUNC) {
|
2022-10-15 02:10:38 +08:00
|
|
|
if ((type.t & VT_STATIC) && (l != VT_CONST))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("function without file scope cannot be static");
|
2009-05-06 02:18:10 +08:00
|
|
|
/* if old style function prototype, we accept a
|
|
|
|
declaration list */
|
|
|
|
sym = type.ref;
|
2022-10-15 02:10:38 +08:00
|
|
|
if (sym->f.func_type == FUNC_OLD && l == VT_CONST) {
|
|
|
|
func_vt = type;
|
|
|
|
decl(VT_CMP);
|
|
|
|
}
|
2022-05-18 04:28:32 +08:00
|
|
|
#if defined TCC_TARGET_MACHO || defined TARGETOS_ANDROID
|
2020-05-22 11:17:02 +08:00
|
|
|
if (sym->f.func_alwinl
|
|
|
|
&& ((type.t & (VT_EXTERN | VT_INLINE))
|
|
|
|
== (VT_EXTERN | VT_INLINE))) {
|
|
|
|
/* always_inline functions must be handled as if they
|
|
|
|
don't generate multiple global defs, even if extern
|
|
|
|
inline, i.e. GNU inline semantics for those. Rewrite
|
|
|
|
them into static inline. */
|
|
|
|
type.t &= ~VT_EXTERN;
|
|
|
|
type.t |= VT_STATIC;
|
|
|
|
}
|
2020-06-21 08:03:46 +08:00
|
|
|
#endif
|
2019-06-22 10:00:52 +08:00
|
|
|
/* always compile 'extern inline' */
|
|
|
|
if (type.t & VT_EXTERN)
|
|
|
|
type.t &= ~VT_INLINE;
|
2020-10-24 03:38:53 +08:00
|
|
|
|
|
|
|
} else if (oldint) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("type defaults to int");
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-22 13:39:54 +08:00
|
|
|
if (gnu_ext && (tok == TOK_ASM1 || tok == TOK_ASM2 || tok == TOK_ASM3)) {
|
|
|
|
ad.asm_label = asm_label_instr();
|
2011-03-03 16:55:02 +08:00
|
|
|
/* parse one last attribute list, after asm label */
|
2021-10-22 13:39:54 +08:00
|
|
|
parse_attribute(&ad);
|
2019-06-22 10:00:52 +08:00
|
|
|
#if 0
|
|
|
|
/* gcc does not allow __asm__("label") with function definition,
|
|
|
|
but why not ... */
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '{')
|
|
|
|
expect(";");
|
2019-06-22 10:00:52 +08:00
|
|
|
#endif
|
2011-03-03 16:55:02 +08:00
|
|
|
}
|
|
|
|
|
2010-01-15 03:55:51 +08:00
|
|
|
#ifdef TCC_TARGET_PE
|
2017-07-09 18:34:11 +08:00
|
|
|
if (ad.a.dllimport || ad.a.dllexport) {
|
2019-06-29 09:42:25 +08:00
|
|
|
if (type.t & VT_STATIC)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("cannot have dll linkage with static");
|
2019-06-29 09:42:25 +08:00
|
|
|
if (type.t & VT_TYPEDEF) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_warning("'%s' attribute ignored for typedef",
|
2019-06-29 09:42:25 +08:00
|
|
|
ad.a.dllimport ? (ad.a.dllimport = 0, "dllimport") :
|
|
|
|
(ad.a.dllexport = 0, "dllexport"));
|
|
|
|
} else if (ad.a.dllimport) {
|
2017-07-09 18:34:11 +08:00
|
|
|
if ((type.t & VT_BTYPE) == VT_FUNC)
|
|
|
|
ad.a.dllimport = 0;
|
|
|
|
else
|
|
|
|
type.t |= VT_EXTERN;
|
|
|
|
}
|
2017-04-04 14:34:52 +08:00
|
|
|
}
|
2010-01-15 03:55:51 +08:00
|
|
|
#endif
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok == '{') {
|
2017-03-11 09:13:59 +08:00
|
|
|
if (l != VT_CONST)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("cannot use local functions");
|
2009-05-06 02:18:10 +08:00
|
|
|
if ((type.t & VT_BTYPE) != VT_FUNC)
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("function definition");
|
2009-05-06 02:18:10 +08:00
|
|
|
|
2017-03-11 09:13:59 +08:00
|
|
|
/* reject abstract declarators in function definition
|
2019-06-11 21:28:42 +08:00
|
|
|
make old style params without decl have int type */
|
2009-05-06 02:18:10 +08:00
|
|
|
sym = type.ref;
|
2017-03-11 09:13:59 +08:00
|
|
|
while ((sym = sym->next) != NULL) {
|
2009-05-06 02:18:10 +08:00
|
|
|
if (!(sym->v & ~SYM_FIELD))
|
2021-10-22 13:39:54 +08:00
|
|
|
expect("identifier");
|
2019-06-11 21:28:42 +08:00
|
|
|
if (sym->type.t == VT_VOID)
|
2021-10-22 13:39:54 +08:00
|
|
|
sym->type = int_type;
|
2019-06-11 21:28:42 +08:00
|
|
|
}
|
2017-02-14 01:23:43 +08:00
|
|
|
|
2020-05-13 17:14:53 +08:00
|
|
|
/* apply post-declaraton attributes */
|
|
|
|
merge_funcattr(&type.ref->f, &ad.f);
|
|
|
|
|
2017-12-04 03:43:48 +08:00
|
|
|
/* put function symbol */
|
2019-06-22 10:00:52 +08:00
|
|
|
type.t &= ~VT_EXTERN;
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = external_sym(v, &type, 0, &ad);
|
2020-05-13 17:14:53 +08:00
|
|
|
|
2009-05-06 02:18:10 +08:00
|
|
|
/* static inline functions are just recorded as a kind
|
|
|
|
of macro. Their code will be emitted at the end of
|
|
|
|
the compilation unit only if they are used */
|
2019-06-22 10:00:52 +08:00
|
|
|
if (sym->type.t & VT_INLINE) {
|
2009-06-30 03:14:53 +08:00
|
|
|
struct InlineFunc *fn;
|
2021-10-22 13:39:54 +08:00
|
|
|
fn = tcc_malloc(sizeof *fn + strlen(file->filename));
|
|
|
|
strcpy(fn->filename, file->filename);
|
tccpp: fix issues, add tests
* fix some macro expansion issues
* add some pp tests in tests/pp
* improved tcc -E output for better diff'ability
* remove -dD feature (quirky code, exotic feature,
didn't work well)
Based partially on ideas / researches from PipCet
Some issues remain with VA_ARGS macros (if used in a
rather tricky way).
Also, to keep it simple, the pp doesn't automtically
add any extra spaces to separate tokens which otherwise
would form wrong tokens if re-read from tcc -E output
(such as '+' '=') GCC does that, other compilers don't.
* cleanups
- #line 01 "file" / # 01 "file" processing
- #pragma comment(lib,"foo")
- tcc -E: forward some pragmas to output (pack, comment(lib))
- fix macro parameter list parsing mess from
a3fc54345949535524d01319e1ca6378b7c2c201
a715d7143d9d17da17e67fec6af1c01409a71a31
(some coffee might help, next time ;)
- introduce TOK_PPSTR - to have character constants as
written in the file (similar to TOK_PPNUM)
- allow '\' appear in macros
- new functions begin/end_macro to:
- fix switching macro levels during expansion
- allow unget_tok to unget more than one tok
- slight speedup by using bitflags in isidnum_table
Also:
- x86_64.c : fix decl after statements
- i386-gen,c : fix a vstack leak with VLA on windows
- configure/Makefile : build on windows (MSYS) was broken
- tcc_warning: fflush stderr to keep output order (win32)
2015-05-09 20:29:39 +08:00
|
|
|
fn->sym = sym;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip_or_save_block(&fn->func_str);
|
|
|
|
dynarray_add(&tcc_state->inline_fns,
|
|
|
|
&tcc_state->nb_inline_fns, fn);
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
/* compute text section */
|
|
|
|
cur_text_section = ad.section;
|
|
|
|
if (!cur_text_section)
|
|
|
|
cur_text_section = text_section;
|
2021-10-22 13:39:54 +08:00
|
|
|
gen_function(sym);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
} else {
|
2017-03-11 09:13:59 +08:00
|
|
|
if (l == VT_CMP) {
|
|
|
|
/* find parameter in function parameter list */
|
2022-10-15 02:10:38 +08:00
|
|
|
for (sym = func_vt.ref->next; sym; sym = sym->next)
|
2017-03-11 09:13:59 +08:00
|
|
|
if ((sym->v & ~SYM_FIELD) == v)
|
|
|
|
goto found;
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("declaration for parameter '%s' but no such parameter",
|
|
|
|
get_tok_str(v, NULL));
|
2022-10-15 02:10:38 +08:00
|
|
|
found:
|
2017-03-11 09:13:59 +08:00
|
|
|
if (type.t & VT_STORAGE) /* 'register' is okay */
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("storage class specified for '%s'",
|
|
|
|
get_tok_str(v, NULL));
|
2017-03-11 09:13:59 +08:00
|
|
|
if (sym->type.t != VT_VOID)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("redefinition of parameter '%s'",
|
|
|
|
get_tok_str(v, NULL));
|
|
|
|
convert_parameter_type(&type);
|
2017-03-11 09:13:59 +08:00
|
|
|
sym->type = type;
|
|
|
|
} else if (type.t & VT_TYPEDEF) {
|
2009-05-06 02:18:10 +08:00
|
|
|
/* save typedefed type */
|
|
|
|
/* XXX: test storage specifiers ? */
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = sym_find(v);
|
|
|
|
if (sym && sym->sym_scope == local_scope) {
|
2016-05-06 14:32:54 +08:00
|
|
|
if (!is_compatible_types(&sym->type, &type)
|
|
|
|
|| !(sym->type.t & VT_TYPEDEF))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("incompatible redefinition of '%s'",
|
|
|
|
get_tok_str(v, NULL));
|
2016-05-06 14:32:54 +08:00
|
|
|
sym->type = type;
|
|
|
|
} else {
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = sym_push(v, &type, 0, 0);
|
2016-05-06 14:32:54 +08:00
|
|
|
}
|
2014-01-07 21:57:07 +08:00
|
|
|
sym->a = ad.a;
|
2023-02-23 00:59:31 +08:00
|
|
|
if ((type.t & VT_BTYPE) == VT_FUNC)
|
|
|
|
merge_funcattr(&sym->type.ref->f, &ad.f);
|
2021-10-22 13:39:54 +08:00
|
|
|
if (debug_modes)
|
|
|
|
tcc_debug_typedef (tcc_state, sym);
|
2018-08-04 04:39:00 +08:00
|
|
|
} else if ((type.t & VT_BTYPE) == VT_VOID
|
|
|
|
&& !(type.t & VT_EXTERN)) {
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("declaration of void object");
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
|
|
|
r = 0;
|
2011-03-03 17:07:36 +08:00
|
|
|
if ((type.t & VT_BTYPE) == VT_FUNC) {
|
|
|
|
/* external function definition */
|
|
|
|
/* specific case for func_call attribute */
|
2023-02-23 00:59:31 +08:00
|
|
|
merge_funcattr(&type.ref->f, &ad.f);
|
2011-03-03 17:07:36 +08:00
|
|
|
} else if (!(type.t & VT_ARRAY)) {
|
|
|
|
/* not lvalue if array */
|
2019-12-17 01:48:31 +08:00
|
|
|
r |= VT_LVAL;
|
2011-03-03 17:07:36 +08:00
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
has_init = (tok == '=');
|
2011-04-07 00:17:03 +08:00
|
|
|
if (has_init && (type.t & VT_VLA))
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("variable length array cannot be initialized");
|
2019-06-22 10:00:52 +08:00
|
|
|
if (((type.t & VT_EXTERN) && (!has_init || l != VT_CONST))
|
|
|
|
|| (type.t & VT_BTYPE) == VT_FUNC
|
|
|
|
/* as with GCC, uninitialized global arrays with no size
|
|
|
|
are considered extern: */
|
|
|
|
|| ((type.t & VT_ARRAY) && !has_init
|
|
|
|
&& l == VT_CONST && type.ref->c < 0)
|
|
|
|
) {
|
2011-03-03 17:07:36 +08:00
|
|
|
/* external variable or function */
|
2019-06-22 10:00:52 +08:00
|
|
|
type.t |= VT_EXTERN;
|
2021-10-22 13:39:54 +08:00
|
|
|
sym = external_sym(v, &type, r, &ad);
|
Reinstate attribute alias handling
commit 2a0167a merged alias and asm symbol renaming, but broke
semantics of aliases, see testcase. Basically the difference between
the two is that an asm rename doesn't generate a new symbol, i.e. with
int foo __asm__("bar");
all source reference to 'foo' will be to 'bar', nothing of the name
'foo' will remain in the object file, and for instance reference to
'foo' from other compilation units won't be resolved to this one.
Aliases OTOH create an additional symbol. With:
void target (void) { return; }
void afunc (void) __attribute__((alias("target")));
reference to 'afunc' will remain 'afunc' in the object file. It will
generate two symbols, 'afunc' and 'target' referring to the same entity.
This difference matters if other compilation units make references to
'afunc'.
A side requirement of this is that for alias to work that the target
symbol needs to be defined in the same unit. For TCC we even require a
stricter variant: it must be defined before the alias is created.
Now, with this I merely re-instated the old flow of events before above
commit. It didn't seem useful anymore to place both names in the
asm_label member of attributes, and the asm_label member of Sym now
again only needs the hold the __asm__ rename.
It also follows that tcc_predefs.h can't make use of attribute alias to
e.g. map __builtin_memcpy to __bound_memcpy (simply because the latter
isn't defined in all units), but rather must use __asm__ renaming, which
in turn means that the underscore handling needs to be done by hand.
2020-09-30 23:46:01 +08:00
|
|
|
if (ad.alias_target) {
|
|
|
|
/* Aliases need to be emitted when their target
|
|
|
|
symbol is emitted, even if perhaps unreferenced.
|
|
|
|
We only support the case where the base is
|
|
|
|
already defined, otherwise we would need
|
|
|
|
deferring to emit the aliases until the end of
|
|
|
|
the compile unit. */
|
2021-10-22 13:39:54 +08:00
|
|
|
Sym *alias_target = sym_find(ad.alias_target);
|
|
|
|
ElfSym *esym = elfsym(alias_target);
|
Reinstate attribute alias handling
commit 2a0167a merged alias and asm symbol renaming, but broke
semantics of aliases, see testcase. Basically the difference between
the two is that an asm rename doesn't generate a new symbol, i.e. with
int foo __asm__("bar");
all source reference to 'foo' will be to 'bar', nothing of the name
'foo' will remain in the object file, and for instance reference to
'foo' from other compilation units won't be resolved to this one.
Aliases OTOH create an additional symbol. With:
void target (void) { return; }
void afunc (void) __attribute__((alias("target")));
reference to 'afunc' will remain 'afunc' in the object file. It will
generate two symbols, 'afunc' and 'target' referring to the same entity.
This difference matters if other compilation units make references to
'afunc'.
A side requirement of this is that for alias to work that the target
symbol needs to be defined in the same unit. For TCC we even require a
stricter variant: it must be defined before the alias is created.
Now, with this I merely re-instated the old flow of events before above
commit. It didn't seem useful anymore to place both names in the
asm_label member of attributes, and the asm_label member of Sym now
again only needs the hold the __asm__ rename.
It also follows that tcc_predefs.h can't make use of attribute alias to
e.g. map __builtin_memcpy to __bound_memcpy (simply because the latter
isn't defined in all units), but rather must use __asm__ renaming, which
in turn means that the underscore handling needs to be done by hand.
2020-09-30 23:46:01 +08:00
|
|
|
if (!esym)
|
2021-10-22 13:39:54 +08:00
|
|
|
tcc_error("unsupported forward __alias__ attribute");
|
|
|
|
put_extern_sym2(sym, esym->st_shndx,
|
2020-10-01 23:52:16 +08:00
|
|
|
esym->st_value, esym->st_size, 1);
|
Reinstate attribute alias handling
commit 2a0167a merged alias and asm symbol renaming, but broke
semantics of aliases, see testcase. Basically the difference between
the two is that an asm rename doesn't generate a new symbol, i.e. with
int foo __asm__("bar");
all source reference to 'foo' will be to 'bar', nothing of the name
'foo' will remain in the object file, and for instance reference to
'foo' from other compilation units won't be resolved to this one.
Aliases OTOH create an additional symbol. With:
void target (void) { return; }
void afunc (void) __attribute__((alias("target")));
reference to 'afunc' will remain 'afunc' in the object file. It will
generate two symbols, 'afunc' and 'target' referring to the same entity.
This difference matters if other compilation units make references to
'afunc'.
A side requirement of this is that for alias to work that the target
symbol needs to be defined in the same unit. For TCC we even require a
stricter variant: it must be defined before the alias is created.
Now, with this I merely re-instated the old flow of events before above
commit. It didn't seem useful anymore to place both names in the
asm_label member of attributes, and the asm_label member of Sym now
again only needs the hold the __asm__ rename.
It also follows that tcc_predefs.h can't make use of attribute alias to
e.g. map __builtin_memcpy to __bound_memcpy (simply because the latter
isn't defined in all units), but rather must use __asm__ renaming, which
in turn means that the underscore handling needs to be done by hand.
2020-09-30 23:46:01 +08:00
|
|
|
}
|
2009-05-06 02:18:10 +08:00
|
|
|
} else {
|
2022-10-15 02:10:38 +08:00
|
|
|
if (l == VT_CONST || (type.t & VT_STATIC))
|
2009-05-06 02:18:10 +08:00
|
|
|
r |= VT_CONST;
|
|
|
|
else
|
2022-10-15 02:10:38 +08:00
|
|
|
r |= VT_LOCAL;
|
2009-05-06 02:18:10 +08:00
|
|
|
if (has_init)
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2019-06-22 10:00:52 +08:00
|
|
|
else if (l == VT_CONST)
|
2017-12-04 03:43:48 +08:00
|
|
|
/* uninitialized global variables may be overridden */
|
|
|
|
type.t |= VT_EXTERN;
|
2022-10-15 02:10:38 +08:00
|
|
|
decl_initializer_alloc(&type, &ad, r, has_init, v, l == VT_CONST);
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
if (tok != ',') {
|
2022-10-15 02:10:38 +08:00
|
|
|
if (l == VT_JMP)
|
2011-03-09 05:36:04 +08:00
|
|
|
return 1;
|
2021-10-22 13:39:54 +08:00
|
|
|
skip(';');
|
2009-05-06 02:18:10 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-10-22 13:39:54 +08:00
|
|
|
next();
|
2009-05-06 02:18:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-03-09 05:36:04 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-15 21:55:31 +08:00
|
|
|
/* ------------------------------------------------------------------------- */
|
2019-04-29 19:53:07 +08:00
|
|
|
#undef gjmp_addr
|
|
|
|
#undef gjmp
|
|
|
|
/* ------------------------------------------------------------------------- */
|