diff options
-rw-r--r-- | arch/powerpc/syscall_arch.h | 14 | ||||
-rw-r--r-- | arch/powerpc64/syscall_arch.h | 14 | ||||
-rw-r--r-- | include/netinet/in.h | 3 | ||||
-rw-r--r-- | src/setjmp/riscv32/longjmp.S | 30 | ||||
-rw-r--r-- | src/setjmp/riscv32/setjmp.S | 30 | ||||
-rw-r--r-- | src/setjmp/riscv64/longjmp.S | 30 | ||||
-rw-r--r-- | src/setjmp/riscv64/setjmp.S | 30 | ||||
-rw-r--r-- | src/thread/aarch64/__set_thread_area.c | 22 |
8 files changed, 110 insertions, 63 deletions
diff --git a/arch/powerpc/syscall_arch.h b/arch/powerpc/syscall_arch.h index 54c885cb..fe893af4 100644 --- a/arch/powerpc/syscall_arch.h +++ b/arch/powerpc/syscall_arch.h @@ -9,7 +9,7 @@ static inline long __syscall0(long n) register long r3 __asm__("r3"); __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "=r"(r3) - :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -19,7 +19,7 @@ static inline long __syscall1(long n, long a) register long r3 __asm__("r3") = a; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3) - :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -30,7 +30,7 @@ static inline long __syscall2(long n, long a, long b) register long r4 __asm__("r4") = b; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4) - :: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -42,7 +42,7 @@ static inline long __syscall3(long n, long a, long b, long c) register long r5 __asm__("r5") = c; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5) - :: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -55,7 +55,7 @@ static inline long __syscall4(long n, long a, long b, long c, long d) register long r6 __asm__("r6") = d; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6) - :: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -69,7 +69,7 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e) register long r7 __asm__("r7") = e; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7) - :: "memory", "cr0", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -84,7 +84,7 @@ static inline long __syscall6(long n, long a, long b, long c, long d, long e, lo register long r8 __asm__("r8") = f; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7), "+r"(r8) - :: "memory", "cr0", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } diff --git a/arch/powerpc64/syscall_arch.h b/arch/powerpc64/syscall_arch.h index 7d34fbe4..4c5d3ae9 100644 --- a/arch/powerpc64/syscall_arch.h +++ b/arch/powerpc64/syscall_arch.h @@ -7,7 +7,7 @@ static inline long __syscall0(long n) register long r3 __asm__("r3"); __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "=r"(r3) - :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -17,7 +17,7 @@ static inline long __syscall1(long n, long a) register long r3 __asm__("r3") = a; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3) - :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -28,7 +28,7 @@ static inline long __syscall2(long n, long a, long b) register long r4 __asm__("r4") = b; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4) - :: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -40,7 +40,7 @@ static inline long __syscall3(long n, long a, long b, long c) register long r5 __asm__("r5") = c; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5) - :: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -53,7 +53,7 @@ static inline long __syscall4(long n, long a, long b, long c, long d) register long r6 __asm__("r6") = d; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6) - :: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -67,7 +67,7 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e) register long r7 __asm__("r7") = e; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7) - :: "memory", "cr0", "r8", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r8", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } @@ -82,7 +82,7 @@ static inline long __syscall6(long n, long a, long b, long c, long d, long e, lo register long r8 __asm__("r8") = f; __asm__ __volatile__("sc ; bns+ 1f ; neg %1, %1 ; 1:" : "+r"(r0), "+r"(r3), "+r"(r4), "+r"(r5), "+r"(r6), "+r"(r7), "+r"(r8) - :: "memory", "cr0", "r9", "r10", "r11", "r12"); + :: "memory", "cr0", "r9", "r10", "r11", "r12", "ctr", "xer"); return r3; } diff --git a/include/netinet/in.h b/include/netinet/in.h index fb628b61..60bbaa75 100644 --- a/include/netinet/in.h +++ b/include/netinet/in.h @@ -132,7 +132,8 @@ uint16_t ntohs(uint16_t); #define IN6_IS_ADDR_V4COMPAT(a) \ (((uint32_t *) (a))[0] == 0 && ((uint32_t *) (a))[1] == 0 && \ - ((uint32_t *) (a))[2] == 0 && ((uint8_t *) (a))[15] > 1) + ((uint32_t *) (a))[2] == 0 && \ + !IN6_IS_ADDR_UNSPECIFIED(a) && !IN6_IS_ADDR_LOOPBACK(a)) #define IN6_IS_ADDR_MC_NODELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && ((((uint8_t *) (a))[1] & 0xf) == 0x1)) diff --git a/src/setjmp/riscv32/longjmp.S b/src/setjmp/riscv32/longjmp.S index f9cb3318..b4e5458d 100644 --- a/src/setjmp/riscv32/longjmp.S +++ b/src/setjmp/riscv32/longjmp.S @@ -23,18 +23,24 @@ longjmp: lw ra, 52(a0) #ifndef __riscv_float_abi_soft - fld fs0, 56(a0) - fld fs1, 64(a0) - fld fs2, 72(a0) - fld fs3, 80(a0) - fld fs4, 88(a0) - fld fs5, 96(a0) - fld fs6, 104(a0) - fld fs7, 112(a0) - fld fs8, 120(a0) - fld fs9, 128(a0) - fld fs10, 136(a0) - fld fs11, 144(a0) +#ifdef __riscv_float_abi_double +#define FLX fld +#else +#define FLX flw +#endif + + FLX fs0, 56(a0) + FLX fs1, 64(a0) + FLX fs2, 72(a0) + FLX fs3, 80(a0) + FLX fs4, 88(a0) + FLX fs5, 96(a0) + FLX fs6, 104(a0) + FLX fs7, 112(a0) + FLX fs8, 120(a0) + FLX fs9, 128(a0) + FLX fs10, 136(a0) + FLX fs11, 144(a0) #endif seqz a0, a1 diff --git a/src/setjmp/riscv32/setjmp.S b/src/setjmp/riscv32/setjmp.S index 8a75cf55..5a1a41ef 100644 --- a/src/setjmp/riscv32/setjmp.S +++ b/src/setjmp/riscv32/setjmp.S @@ -23,18 +23,24 @@ setjmp: sw ra, 52(a0) #ifndef __riscv_float_abi_soft - fsd fs0, 56(a0) - fsd fs1, 64(a0) - fsd fs2, 72(a0) - fsd fs3, 80(a0) - fsd fs4, 88(a0) - fsd fs5, 96(a0) - fsd fs6, 104(a0) - fsd fs7, 112(a0) - fsd fs8, 120(a0) - fsd fs9, 128(a0) - fsd fs10, 136(a0) - fsd fs11, 144(a0) +#ifdef __riscv_float_abi_double +#define FSX fsd +#else +#define FSX fsw +#endif + + FSX fs0, 56(a0) + FSX fs1, 64(a0) + FSX fs2, 72(a0) + FSX fs3, 80(a0) + FSX fs4, 88(a0) + FSX fs5, 96(a0) + FSX fs6, 104(a0) + FSX fs7, 112(a0) + FSX fs8, 120(a0) + FSX fs9, 128(a0) + FSX fs10, 136(a0) + FSX fs11, 144(a0) #endif li a0, 0 diff --git a/src/setjmp/riscv64/longjmp.S b/src/setjmp/riscv64/longjmp.S index 41e2d210..982475c7 100644 --- a/src/setjmp/riscv64/longjmp.S +++ b/src/setjmp/riscv64/longjmp.S @@ -23,18 +23,24 @@ longjmp: ld ra, 104(a0) #ifndef __riscv_float_abi_soft - fld fs0, 112(a0) - fld fs1, 120(a0) - fld fs2, 128(a0) - fld fs3, 136(a0) - fld fs4, 144(a0) - fld fs5, 152(a0) - fld fs6, 160(a0) - fld fs7, 168(a0) - fld fs8, 176(a0) - fld fs9, 184(a0) - fld fs10, 192(a0) - fld fs11, 200(a0) +#ifdef __riscv_float_abi_double +#define FLX fld +#else +#define FLX flw +#endif + + FLX fs0, 112(a0) + FLX fs1, 120(a0) + FLX fs2, 128(a0) + FLX fs3, 136(a0) + FLX fs4, 144(a0) + FLX fs5, 152(a0) + FLX fs6, 160(a0) + FLX fs7, 168(a0) + FLX fs8, 176(a0) + FLX fs9, 184(a0) + FLX fs10, 192(a0) + FLX fs11, 200(a0) #endif seqz a0, a1 diff --git a/src/setjmp/riscv64/setjmp.S b/src/setjmp/riscv64/setjmp.S index 51249672..0795bf7d 100644 --- a/src/setjmp/riscv64/setjmp.S +++ b/src/setjmp/riscv64/setjmp.S @@ -23,18 +23,24 @@ setjmp: sd ra, 104(a0) #ifndef __riscv_float_abi_soft - fsd fs0, 112(a0) - fsd fs1, 120(a0) - fsd fs2, 128(a0) - fsd fs3, 136(a0) - fsd fs4, 144(a0) - fsd fs5, 152(a0) - fsd fs6, 160(a0) - fsd fs7, 168(a0) - fsd fs8, 176(a0) - fsd fs9, 184(a0) - fsd fs10, 192(a0) - fsd fs11, 200(a0) +#ifdef __riscv_float_abi_double +#define FSX fsd +#else +#define FSX fsw +#endif + + FSX fs0, 112(a0) + FSX fs1, 120(a0) + FSX fs2, 128(a0) + FSX fs3, 136(a0) + FSX fs4, 144(a0) + FSX fs5, 152(a0) + FSX fs6, 160(a0) + FSX fs7, 168(a0) + FSX fs8, 176(a0) + FSX fs9, 184(a0) + FSX fs10, 192(a0) + FSX fs11, 200(a0) #endif li a0, 0 diff --git a/src/thread/aarch64/__set_thread_area.c b/src/thread/aarch64/__set_thread_area.c index a348ee77..2ec788e8 100644 --- a/src/thread/aarch64/__set_thread_area.c +++ b/src/thread/aarch64/__set_thread_area.c @@ -1,5 +1,27 @@ +#include <elf.h> +#include "libc.h" + +#define BITRANGE(a,b) (2*(1UL<<(b))-(1UL<<(a))) + int __set_thread_area(void *p) { __asm__ __volatile__ ("msr tpidr_el0,%0" : : "r"(p) : "memory"); + + /* Mask off hwcap bits for SME and unknown future features. This is + * necessary because SME is not safe to use without libc support for + * it, and we do not (yet) have such support. */ + for (size_t *v = libc.auxv; *v; v+=2) { + if (v[0]==AT_HWCAP) { + v[1] &= ~BITRANGE(42,63); /* 42-47 are SME */ + } else if (v[0]==AT_HWCAP2) { + v[1] &= ~(BITRANGE(23,30) + | BITRANGE(37,42) + | BITRANGE(57,62)); + } else if (v[0]==AT_HWCAP3 || v[0]==AT_HWCAP4) { + v[0] = AT_IGNORE; + v[1] = 0; + } + } + return 0; } |