summaryrefslogtreecommitdiff
path: root/arch/mips64
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-07-16 20:49:02 -0400
committerRich Felker <dalias@aerifal.cx>2019-07-16 20:49:02 -0400
commitddc7c4f936c7a90781072f10dbaa122007e939d0 (patch)
tree289319113bf6ce143f97eb0fbbc08f46305b8618 /arch/mips64
parentdb2a148d9df3d7d1f3423313761f0e2517c1aa2b (diff)
downloadmusl-ddc7c4f936c7a90781072f10dbaa122007e939d0.tar.gz
clean up mips64/n32 syscall asm constraints
ever since inline syscalls were added for (o32) mips in commit 328810d32524e4928fec50b57e37e1bf330b2e40, the asm has nonsensically loaded the syscall number, rather than taking $2 as an input constraint to let the compiler load it. commit cfc09b1ecf0c6981494fd73dffe234416f66af10 improved on this somewhat by allowing a constant syscall number to propagate into an immediate, but missed that the whole operation made no sense. now, only $4, $5, $6, $8, and $9 are potential input-only registers. $2 is always input and output, and $7 is both when it's an argument, otherwise output-only. previously, $7 was treated as an input (with a "1" constraint matching its output position) even when it was not an input, which was arguably undefined behavior (asm input from indeterminate value). this is corrected.
Diffstat (limited to 'arch/mips64')
-rw-r--r--arch/mips64/syscall_arch.h55
1 files changed, 28 insertions, 27 deletions
diff --git a/arch/mips64/syscall_arch.h b/arch/mips64/syscall_arch.h
index a653c31c..1e720c7d 100644
--- a/arch/mips64/syscall_arch.h
+++ b/arch/mips64/syscall_arch.h
@@ -53,10 +53,11 @@ static void __stat_fix(struct kernel_stat *kst, struct stat *st)
static inline long __syscall0(long n)
{
register long r7 __asm__("$7");
- register long r2 __asm__("$2");
+ register long r2 __asm__("$2") = n;
__asm__ __volatile__ (
- "daddu $2,$0,%2 ; syscall"
- : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7)
+ "syscall"
+ : "+&r"(r2), "=r"(r7)
+ :
: SYSCALL_CLOBBERLIST);
return r7 ? -r2 : r2;
}
@@ -65,11 +66,11 @@ static inline long __syscall1(long n, long a)
{
register long r4 __asm__("$4") = a;
register long r7 __asm__("$7");
- register long r2 __asm__("$2");
+ register long r2 __asm__("$2") = n;
__asm__ __volatile__ (
- "daddu $2,$0,%2 ; syscall"
- : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
- "r"(r4)
+ "syscall"
+ : "+&r"(r2), "=r"(r7)
+ : "r"(r4)
: SYSCALL_CLOBBERLIST);
return r7 ? -r2 : r2;
}
@@ -81,15 +82,15 @@ static inline long __syscall2(long n, long a, long b)
register long r4 __asm__("$4") = a;
register long r5 __asm__("$5") = b;
register long r7 __asm__("$7");
- register long r2 __asm__("$2");
+ register long r2 __asm__("$2") = n;
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
r5 = (long) &kst;
__asm__ __volatile__ (
- "daddu $2,$0,%2 ; syscall"
- : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
- "r"(r4), "r"(r5)
+ "syscall"
+ : "+&r"(r2), "=r"(r7)
+ : "r"(r4), "r"(r5)
: SYSCALL_CLOBBERLIST);
if (r7) return -r2;
@@ -109,15 +110,15 @@ static inline long __syscall3(long n, long a, long b, long c)
register long r5 __asm__("$5") = b;
register long r6 __asm__("$6") = c;
register long r7 __asm__("$7");
- register long r2 __asm__("$2");
+ register long r2 __asm__("$2") = n;
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
r5 = (long) &kst;
__asm__ __volatile__ (
- "daddu $2,$0,%2 ; syscall"
- : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
- "r"(r4), "r"(r5), "r"(r6)
+ "syscall"
+ : "+&r"(r2), "=r"(r7)
+ : "r"(r4), "r"(r5), "r"(r6)
: SYSCALL_CLOBBERLIST);
if (r7) return -r2;
@@ -137,7 +138,7 @@ static inline long __syscall4(long n, long a, long b, long c, long d)
register long r5 __asm__("$5") = b;
register long r6 __asm__("$6") = c;
register long r7 __asm__("$7") = d;
- register long r2 __asm__("$2");
+ register long r2 __asm__("$2") = n;
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
r5 = (long) &kst;
@@ -145,9 +146,9 @@ static inline long __syscall4(long n, long a, long b, long c, long d)
r6 = (long) &kst;
__asm__ __volatile__ (
- "daddu $2,$0,%2 ; syscall"
- : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
- "r"(r4), "r"(r5), "r"(r6)
+ "syscall"
+ : "+&r"(r2), "+r"(r7)
+ : "r"(r4), "r"(r5), "r"(r6)
: SYSCALL_CLOBBERLIST);
if (r7) return -r2;
@@ -170,7 +171,7 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e)
register long r6 __asm__("$6") = c;
register long r7 __asm__("$7") = d;
register long r8 __asm__("$8") = e;
- register long r2 __asm__("$2");
+ register long r2 __asm__("$2") = n;
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
r5 = (long) &kst;
@@ -178,9 +179,9 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e)
r6 = (long) &kst;
__asm__ __volatile__ (
- "daddu $2,$0,%2 ; syscall"
- : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
- "r"(r4), "r"(r5), "r"(r6), "r"(r8)
+ "syscall"
+ : "+&r"(r2), "+r"(r7)
+ : "r"(r4), "r"(r5), "r"(r6), "r"(r8)
: SYSCALL_CLOBBERLIST);
if (r7) return -r2;
@@ -204,7 +205,7 @@ static inline long __syscall6(long n, long a, long b, long c, long d, long e, lo
register long r7 __asm__("$7") = d;
register long r8 __asm__("$8") = e;
register long r9 __asm__("$9") = f;
- register long r2 __asm__("$2");
+ register long r2 __asm__("$2") = n;
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
r5 = (long) &kst;
@@ -212,9 +213,9 @@ static inline long __syscall6(long n, long a, long b, long c, long d, long e, lo
r6 = (long) &kst;
__asm__ __volatile__ (
- "daddu $2,$0,%2 ; syscall"
- : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
- "r"(r4), "r"(r5), "r"(r6), "r"(r8), "r"(r9)
+ "syscall"
+ : "+&r"(r2), "+r"(r7)
+ : "r"(r4), "r"(r5), "r"(r6), "r"(r8), "r"(r9)
: SYSCALL_CLOBBERLIST);
if (r7) return -r2;