|
1 | 1 | /* SPDX-License-Identifier: LGPL-2.1 OR MIT */ |
2 | 2 | /* |
3 | | - * x86_64 specific definitions for NOLIBC |
4 | | - * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu> |
| 3 | + * x86 specific definitions for NOLIBC (both 32- and 64-bit) |
| 4 | + * Copyright (C) 2017-2025 Willy Tarreau <w@1wt.eu> |
5 | 5 | */ |
6 | 6 |
|
7 | | -#ifndef _NOLIBC_ARCH_X86_64_H |
8 | | -#define _NOLIBC_ARCH_X86_64_H |
| 7 | +#ifndef _NOLIBC_ARCH_X86_H |
| 8 | +#define _NOLIBC_ARCH_X86_H |
9 | 9 |
|
10 | 10 | #include "compiler.h" |
11 | 11 | #include "crt.h" |
12 | 12 |
|
| 13 | +#if !defined(__x86_64__) |
| 14 | + |
| 15 | +/* Syscalls for i386 : |
| 16 | + * - mostly similar to x86_64 |
| 17 | + * - registers are 32-bit |
| 18 | + * - syscall number is passed in eax |
| 19 | + * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively |
| 20 | + * - all registers are preserved (except eax of course) |
| 21 | + * - the system call is performed by calling int $0x80 |
| 22 | + * - syscall return comes in eax |
| 23 | + * - the arguments are cast to long and assigned into the target registers |
| 24 | + * which are then simply passed as registers to the asm code, so that we |
| 25 | + * don't have to experience issues with register constraints. |
| 26 | + * - the syscall number is always specified last in order to allow to force |
| 27 | + * some registers before (gcc refuses a %-register at the last position). |
| 28 | + * |
| 29 | + * Also, i386 supports the old_select syscall if newselect is not available |
| 30 | + */ |
| 31 | +#define __ARCH_WANT_SYS_OLD_SELECT |
| 32 | + |
| 33 | +#define my_syscall0(num) \ |
| 34 | +({ \ |
| 35 | + long _ret; \ |
| 36 | + register long _num __asm__ ("eax") = (num); \ |
| 37 | + \ |
| 38 | + __asm__ volatile ( \ |
| 39 | + "int $0x80\n" \ |
| 40 | + : "=a" (_ret) \ |
| 41 | + : "0"(_num) \ |
| 42 | + : "memory", "cc" \ |
| 43 | + ); \ |
| 44 | + _ret; \ |
| 45 | +}) |
| 46 | + |
| 47 | +#define my_syscall1(num, arg1) \ |
| 48 | +({ \ |
| 49 | + long _ret; \ |
| 50 | + register long _num __asm__ ("eax") = (num); \ |
| 51 | + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ |
| 52 | + \ |
| 53 | + __asm__ volatile ( \ |
| 54 | + "int $0x80\n" \ |
| 55 | + : "=a" (_ret) \ |
| 56 | + : "r"(_arg1), \ |
| 57 | + "0"(_num) \ |
| 58 | + : "memory", "cc" \ |
| 59 | + ); \ |
| 60 | + _ret; \ |
| 61 | +}) |
| 62 | + |
| 63 | +#define my_syscall2(num, arg1, arg2) \ |
| 64 | +({ \ |
| 65 | + long _ret; \ |
| 66 | + register long _num __asm__ ("eax") = (num); \ |
| 67 | + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ |
| 68 | + register long _arg2 __asm__ ("ecx") = (long)(arg2); \ |
| 69 | + \ |
| 70 | + __asm__ volatile ( \ |
| 71 | + "int $0x80\n" \ |
| 72 | + : "=a" (_ret) \ |
| 73 | + : "r"(_arg1), "r"(_arg2), \ |
| 74 | + "0"(_num) \ |
| 75 | + : "memory", "cc" \ |
| 76 | + ); \ |
| 77 | + _ret; \ |
| 78 | +}) |
| 79 | + |
| 80 | +#define my_syscall3(num, arg1, arg2, arg3) \ |
| 81 | +({ \ |
| 82 | + long _ret; \ |
| 83 | + register long _num __asm__ ("eax") = (num); \ |
| 84 | + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ |
| 85 | + register long _arg2 __asm__ ("ecx") = (long)(arg2); \ |
| 86 | + register long _arg3 __asm__ ("edx") = (long)(arg3); \ |
| 87 | + \ |
| 88 | + __asm__ volatile ( \ |
| 89 | + "int $0x80\n" \ |
| 90 | + : "=a" (_ret) \ |
| 91 | + : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ |
| 92 | + "0"(_num) \ |
| 93 | + : "memory", "cc" \ |
| 94 | + ); \ |
| 95 | + _ret; \ |
| 96 | +}) |
| 97 | + |
| 98 | +#define my_syscall4(num, arg1, arg2, arg3, arg4) \ |
| 99 | +({ \ |
| 100 | + long _ret; \ |
| 101 | + register long _num __asm__ ("eax") = (num); \ |
| 102 | + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ |
| 103 | + register long _arg2 __asm__ ("ecx") = (long)(arg2); \ |
| 104 | + register long _arg3 __asm__ ("edx") = (long)(arg3); \ |
| 105 | + register long _arg4 __asm__ ("esi") = (long)(arg4); \ |
| 106 | + \ |
| 107 | + __asm__ volatile ( \ |
| 108 | + "int $0x80\n" \ |
| 109 | + : "=a" (_ret) \ |
| 110 | + : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ |
| 111 | + "0"(_num) \ |
| 112 | + : "memory", "cc" \ |
| 113 | + ); \ |
| 114 | + _ret; \ |
| 115 | +}) |
| 116 | + |
| 117 | +#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ |
| 118 | +({ \ |
| 119 | + long _ret; \ |
| 120 | + register long _num __asm__ ("eax") = (num); \ |
| 121 | + register long _arg1 __asm__ ("ebx") = (long)(arg1); \ |
| 122 | + register long _arg2 __asm__ ("ecx") = (long)(arg2); \ |
| 123 | + register long _arg3 __asm__ ("edx") = (long)(arg3); \ |
| 124 | + register long _arg4 __asm__ ("esi") = (long)(arg4); \ |
| 125 | + register long _arg5 __asm__ ("edi") = (long)(arg5); \ |
| 126 | + \ |
| 127 | + __asm__ volatile ( \ |
| 128 | + "int $0x80\n" \ |
| 129 | + : "=a" (_ret) \ |
| 130 | + : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ |
| 131 | + "0"(_num) \ |
| 132 | + : "memory", "cc" \ |
| 133 | + ); \ |
| 134 | + _ret; \ |
| 135 | +}) |
| 136 | + |
| 137 | +#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \ |
| 138 | +({ \ |
| 139 | + long _eax = (long)(num); \ |
| 140 | + long _arg6 = (long)(arg6); /* Always in memory */ \ |
| 141 | + __asm__ volatile ( \ |
| 142 | + "pushl %[_arg6]\n\t" \ |
| 143 | + "pushl %%ebp\n\t" \ |
| 144 | + "movl 4(%%esp),%%ebp\n\t" \ |
| 145 | + "int $0x80\n\t" \ |
| 146 | + "popl %%ebp\n\t" \ |
| 147 | + "addl $4,%%esp\n\t" \ |
| 148 | + : "+a"(_eax) /* %eax */ \ |
| 149 | + : "b"(arg1), /* %ebx */ \ |
| 150 | + "c"(arg2), /* %ecx */ \ |
| 151 | + "d"(arg3), /* %edx */ \ |
| 152 | + "S"(arg4), /* %esi */ \ |
| 153 | + "D"(arg5), /* %edi */ \ |
| 154 | + [_arg6]"m"(_arg6) /* memory */ \ |
| 155 | + : "memory", "cc" \ |
| 156 | + ); \ |
| 157 | + _eax; \ |
| 158 | +}) |
| 159 | + |
| 160 | +/* startup code */ |
| 161 | +/* |
| 162 | + * i386 System V ABI mandates: |
| 163 | + * 1) last pushed argument must be 16-byte aligned. |
| 164 | + * 2) The deepest stack frame should be set to zero |
| 165 | + * |
| 166 | + */ |
| 167 | +void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void) |
| 168 | +{ |
| 169 | + __asm__ volatile ( |
| 170 | + "xor %ebp, %ebp\n" /* zero the stack frame */ |
| 171 | + "mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */ |
| 172 | + "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */ |
| 173 | + "push %eax\n" /* push arg1 on stack to support plain stack modes too */ |
| 174 | + "call _start_c\n" /* transfer to c runtime */ |
| 175 | + "hlt\n" /* ensure it does not return */ |
| 176 | + ); |
| 177 | + __nolibc_entrypoint_epilogue(); |
| 178 | +} |
| 179 | + |
| 180 | +#else /* !defined(__x86_64__) */ |
| 181 | + |
13 | 182 | /* Syscalls for x86_64 : |
14 | 183 | * - registers are 64-bit |
15 | 184 | * - syscall number is passed in rax |
@@ -214,4 +383,5 @@ __asm__ ( |
214 | 383 | "retq\n" |
215 | 384 | ); |
216 | 385 |
|
217 | | -#endif /* _NOLIBC_ARCH_X86_64_H */ |
| 386 | +#endif /* !defined(__x86_64__) */ |
| 387 | +#endif /* _NOLIBC_ARCH_X86_H */ |
0 commit comments