Skip to content

Commit cc6dc5f

Browse files
committed
tools/nolibc: merge i386 and x86_64 into a single x86 arch
This remained the only exception to the kernel's architectures organization and it's always a bit cumbersome to deal with. Let's merge i386 and x86_64 into x86. This will result in a single arch-x86.h file by default, and we'll no longer need to merge the two manually during installation. Requesting either i386 or x86_64 will also result in installing x86. Acked-by: Thomas Weißschuh <linux@weissschuh.net> Signed-off-by: Willy Tarreau <w@1wt.eu>
1 parent a477629 commit cc6dc5f

4 files changed

Lines changed: 179 additions & 195 deletions

File tree

tools/include/nolibc/Makefile

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -91,18 +91,12 @@ help:
9191
@echo " OUTPUT = $(OUTPUT)"
9292
@echo ""
9393

94-
# Note: when ARCH is "x86" we concatenate both x86_64 and i386
9594
headers:
9695
$(Q)mkdir -p $(OUTPUT)sysroot
9796
$(Q)mkdir -p $(OUTPUT)sysroot/include
9897
$(Q)cp --parents $(all_files) $(OUTPUT)sysroot/include/
99-
$(Q)if [ "$(ARCH)" = "x86" ]; then \
100-
sed -e \
101-
's,^#ifndef _NOLIBC_ARCH_X86_64_H,#if !defined(_NOLIBC_ARCH_X86_64_H) \&\& defined(__x86_64__),' \
102-
arch-x86_64.h; \
103-
sed -e \
104-
's,^#ifndef _NOLIBC_ARCH_I386_H,#if !defined(_NOLIBC_ARCH_I386_H) \&\& !defined(__x86_64__),' \
105-
arch-i386.h; \
98+
$(Q)if [ "$(ARCH)" = "i386" -o "$(ARCH)" = "x86_64" ]; then \
99+
cat arch-x86.h; \
106100
elif [ -e "$(arch_file)" ]; then \
107101
cat $(arch_file); \
108102
else \

tools/include/nolibc/arch-i386.h

Lines changed: 0 additions & 178 deletions
This file was deleted.
Lines changed: 175 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,184 @@
11
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
22
/*
3-
* x86_64 specific definitions for NOLIBC
4-
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
3+
* x86 specific definitions for NOLIBC (both 32- and 64-bit)
4+
* Copyright (C) 2017-2025 Willy Tarreau <w@1wt.eu>
55
*/
66

7-
#ifndef _NOLIBC_ARCH_X86_64_H
8-
#define _NOLIBC_ARCH_X86_64_H
7+
#ifndef _NOLIBC_ARCH_X86_H
8+
#define _NOLIBC_ARCH_X86_H
99

1010
#include "compiler.h"
1111
#include "crt.h"
1212

13+
#if !defined(__x86_64__)
14+
15+
/* Syscalls for i386 :
16+
* - mostly similar to x86_64
17+
* - registers are 32-bit
18+
* - syscall number is passed in eax
19+
* - arguments are in ebx, ecx, edx, esi, edi, ebp respectively
20+
* - all registers are preserved (except eax of course)
21+
* - the system call is performed by calling int $0x80
22+
* - syscall return comes in eax
23+
* - the arguments are cast to long and assigned into the target registers
24+
* which are then simply passed as registers to the asm code, so that we
25+
* don't have to experience issues with register constraints.
26+
* - the syscall number is always specified last in order to allow to force
27+
* some registers before (gcc refuses a %-register at the last position).
28+
*
29+
* Also, i386 supports the old_select syscall if newselect is not available
30+
*/
31+
#define __ARCH_WANT_SYS_OLD_SELECT
32+
33+
#define my_syscall0(num) \
34+
({ \
35+
long _ret; \
36+
register long _num __asm__ ("eax") = (num); \
37+
\
38+
__asm__ volatile ( \
39+
"int $0x80\n" \
40+
: "=a" (_ret) \
41+
: "0"(_num) \
42+
: "memory", "cc" \
43+
); \
44+
_ret; \
45+
})
46+
47+
#define my_syscall1(num, arg1) \
48+
({ \
49+
long _ret; \
50+
register long _num __asm__ ("eax") = (num); \
51+
register long _arg1 __asm__ ("ebx") = (long)(arg1); \
52+
\
53+
__asm__ volatile ( \
54+
"int $0x80\n" \
55+
: "=a" (_ret) \
56+
: "r"(_arg1), \
57+
"0"(_num) \
58+
: "memory", "cc" \
59+
); \
60+
_ret; \
61+
})
62+
63+
#define my_syscall2(num, arg1, arg2) \
64+
({ \
65+
long _ret; \
66+
register long _num __asm__ ("eax") = (num); \
67+
register long _arg1 __asm__ ("ebx") = (long)(arg1); \
68+
register long _arg2 __asm__ ("ecx") = (long)(arg2); \
69+
\
70+
__asm__ volatile ( \
71+
"int $0x80\n" \
72+
: "=a" (_ret) \
73+
: "r"(_arg1), "r"(_arg2), \
74+
"0"(_num) \
75+
: "memory", "cc" \
76+
); \
77+
_ret; \
78+
})
79+
80+
#define my_syscall3(num, arg1, arg2, arg3) \
81+
({ \
82+
long _ret; \
83+
register long _num __asm__ ("eax") = (num); \
84+
register long _arg1 __asm__ ("ebx") = (long)(arg1); \
85+
register long _arg2 __asm__ ("ecx") = (long)(arg2); \
86+
register long _arg3 __asm__ ("edx") = (long)(arg3); \
87+
\
88+
__asm__ volatile ( \
89+
"int $0x80\n" \
90+
: "=a" (_ret) \
91+
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
92+
"0"(_num) \
93+
: "memory", "cc" \
94+
); \
95+
_ret; \
96+
})
97+
98+
#define my_syscall4(num, arg1, arg2, arg3, arg4) \
99+
({ \
100+
long _ret; \
101+
register long _num __asm__ ("eax") = (num); \
102+
register long _arg1 __asm__ ("ebx") = (long)(arg1); \
103+
register long _arg2 __asm__ ("ecx") = (long)(arg2); \
104+
register long _arg3 __asm__ ("edx") = (long)(arg3); \
105+
register long _arg4 __asm__ ("esi") = (long)(arg4); \
106+
\
107+
__asm__ volatile ( \
108+
"int $0x80\n" \
109+
: "=a" (_ret) \
110+
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
111+
"0"(_num) \
112+
: "memory", "cc" \
113+
); \
114+
_ret; \
115+
})
116+
117+
#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
118+
({ \
119+
long _ret; \
120+
register long _num __asm__ ("eax") = (num); \
121+
register long _arg1 __asm__ ("ebx") = (long)(arg1); \
122+
register long _arg2 __asm__ ("ecx") = (long)(arg2); \
123+
register long _arg3 __asm__ ("edx") = (long)(arg3); \
124+
register long _arg4 __asm__ ("esi") = (long)(arg4); \
125+
register long _arg5 __asm__ ("edi") = (long)(arg5); \
126+
\
127+
__asm__ volatile ( \
128+
"int $0x80\n" \
129+
: "=a" (_ret) \
130+
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
131+
"0"(_num) \
132+
: "memory", "cc" \
133+
); \
134+
_ret; \
135+
})
136+
137+
#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
138+
({ \
139+
long _eax = (long)(num); \
140+
long _arg6 = (long)(arg6); /* Always in memory */ \
141+
__asm__ volatile ( \
142+
"pushl %[_arg6]\n\t" \
143+
"pushl %%ebp\n\t" \
144+
"movl 4(%%esp),%%ebp\n\t" \
145+
"int $0x80\n\t" \
146+
"popl %%ebp\n\t" \
147+
"addl $4,%%esp\n\t" \
148+
: "+a"(_eax) /* %eax */ \
149+
: "b"(arg1), /* %ebx */ \
150+
"c"(arg2), /* %ecx */ \
151+
"d"(arg3), /* %edx */ \
152+
"S"(arg4), /* %esi */ \
153+
"D"(arg5), /* %edi */ \
154+
[_arg6]"m"(_arg6) /* memory */ \
155+
: "memory", "cc" \
156+
); \
157+
_eax; \
158+
})
159+
160+
/* startup code */
161+
/*
162+
* i386 System V ABI mandates:
163+
* 1) last pushed argument must be 16-byte aligned.
164+
* 2) The deepest stack frame should be set to zero
165+
*
166+
*/
167+
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
168+
{
169+
__asm__ volatile (
170+
"xor %ebp, %ebp\n" /* zero the stack frame */
171+
"mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */
172+
"sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */
173+
"push %eax\n" /* push arg1 on stack to support plain stack modes too */
174+
"call _start_c\n" /* transfer to c runtime */
175+
"hlt\n" /* ensure it does not return */
176+
);
177+
__nolibc_entrypoint_epilogue();
178+
}
179+
180+
#else /* !defined(__x86_64__) */
181+
13182
/* Syscalls for x86_64 :
14183
* - registers are 64-bit
15184
* - syscall number is passed in rax
@@ -214,4 +383,5 @@ __asm__ (
214383
"retq\n"
215384
);
216385

217-
#endif /* _NOLIBC_ARCH_X86_64_H */
386+
#endif /* !defined(__x86_64__) */
387+
#endif /* _NOLIBC_ARCH_X86_H */

0 commit comments

Comments
 (0)