summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2019-04-10 19:51:47 -0400
committerRich Felker <dalias@aerifal.cx>2019-04-10 19:56:08 -0400
commit1bcdaeee6e659f1d856717c9aa562a068f2f3bd4 (patch)
tree532c627fa3b0c76225741ee31a7266c38496f888
parentd3b4869cd3a1861f824b9a97c3078846748764f0 (diff)
downloadmusl-1bcdaeee6e659f1d856717c9aa562a068f2f3bd4.tar.gz
implement inline 5- and 6-argument syscalls for mipsn32 and mips64
n32 and n64 ABIs add new argument registers vs o32, so that passing on the stack is not necessary, so it's not clear why the 5- and 6-argument versions were special-cased to begin with; it seems to have been pattern-copying from arch/mips (o32). i've treated the new argument registers like the first 4 in terms of clobber status (non-clobbered). hopefully this is correct.
-rw-r--r--arch/mips64/syscall_arch.h66
-rw-r--r--arch/mipsn32/syscall_arch.h31
2 files changed, 68 insertions, 29 deletions
diff --git a/arch/mips64/syscall_arch.h b/arch/mips64/syscall_arch.h
index 28d0f934..99eebc32 100644
--- a/arch/mips64/syscall_arch.h
+++ b/arch/mips64/syscall_arch.h
@@ -1,9 +1,6 @@
#define __SYSCALL_LL_E(x) (x)
#define __SYSCALL_LL_O(x) (x)
-__attribute__((visibility("hidden")))
-long (__syscall)(long, ...);
-
#define SYSCALL_RLIM_INFINITY (-1UL/2)
#include <sys/stat.h>
@@ -167,48 +164,71 @@ static inline long __syscall4(long n, long a, long b, long c, long d)
static inline long __syscall5(long n, long a, long b, long c, long d, long e)
{
- long r2;
- long old_b = b;
- long old_c = c;
struct kernel_stat kst;
+ long ret;
+ register long r4 __asm__("$4") = a;
+ register long r5 __asm__("$5") = b;
+ register long r6 __asm__("$6") = c;
+ register long r7 __asm__("$7") = d;
+ register long r8 __asm__("$8") = e;
+ register long r2 __asm__("$2");
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
- b = (long) &kst;
+ r5 = (long) &kst;
if (n == SYS_newfstatat)
- c = (long) &kst;
+ r6 = (long) &kst;
- r2 = (__syscall)(n, a, b, c, d, e);
- if (r2 > -4096UL) return r2;
+ __asm__ __volatile__ (
+ "daddu $2,$0,%2 ; syscall"
+ : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
+ "r"(r4), "r"(r5), "r"(r6), "r"(r8)
+ : "$1", "$3", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+
+ if (r7) return -r2;
+ ret = r2;
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
- __stat_fix(&kst, (struct stat *)old_b);
+ __stat_fix(&kst, (struct stat *)b);
if (n == SYS_newfstatat)
- __stat_fix(&kst, (struct stat *)old_c);
+ __stat_fix(&kst, (struct stat *)c);
- return r2;
+ return ret;
}
static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
{
- long r2;
- long old_b = b;
- long old_c = c;
struct kernel_stat kst;
+ long ret;
+ register long r4 __asm__("$4") = a;
+ register long r5 __asm__("$5") = b;
+ register long r6 __asm__("$6") = c;
+ register long r7 __asm__("$7") = d;
+ register long r8 __asm__("$8") = e;
+ register long r9 __asm__("$9") = f;
+ register long r2 __asm__("$2");
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
- b = (long) &kst;
+ r5 = (long) &kst;
if (n == SYS_newfstatat)
- c = (long) &kst;
+ r6 = (long) &kst;
+
+ __asm__ __volatile__ (
+ "daddu $2,$0,%2 ; syscall"
+ : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
+ "r"(r4), "r"(r5), "r"(r6), "r"(r8), "r"(r9)
+ : "$1", "$3", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
- r2 = (__syscall)(n, a, b, c, d, e, f);
- if (r2 > -4096UL) return r2;
+ if (r7) return -r2;
+ ret = r2;
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat)
- __stat_fix(&kst, (struct stat *)old_b);
+ __stat_fix(&kst, (struct stat *)b);
if (n == SYS_newfstatat)
- __stat_fix(&kst, (struct stat *)old_c);
+ __stat_fix(&kst, (struct stat *)c);
- return r2;
+ return ret;
}
#define VDSO_USEFUL
diff --git a/arch/mipsn32/syscall_arch.h b/arch/mipsn32/syscall_arch.h
index f6a1fbae..2ebf0306 100644
--- a/arch/mipsn32/syscall_arch.h
+++ b/arch/mipsn32/syscall_arch.h
@@ -1,8 +1,6 @@
#define __SYSCALL_LL_E(x) (x)
#define __SYSCALL_LL_O(x) (x)
-hidden long (__syscall)(long, ...);
-
#define SYSCALL_RLIM_INFINITY (-1UL/2)
#if _MIPSEL || __MIPSEL || __MIPSEL__
@@ -102,8 +100,18 @@ static inline long __syscall4(long n, long a, long b, long c, long d)
static inline long __syscall5(long n, long a, long b, long c, long d, long e)
{
- long r2 = (__syscall)(n, a, b, c, d, e);
- if (r2 > -4096UL) return r2;
+ register long r4 __asm__("$4") = a;
+ register long r5 __asm__("$5") = b;
+ register long r6 __asm__("$6") = c;
+ register long r7 __asm__("$7") = d;
+ register long r8 __asm__("$8") = e;
+ register long r2 __asm__("$2");
+ __asm__ __volatile__ (
+ "addu $2,$0,%2 ; syscall"
+ : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
+ "r"(r4), "r"(r5), "r"(r6), "r"(r8)
+ : "$1", "$3", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat) __stat_fix(b);
if (n == SYS_newfstatat) __stat_fix(c);
return r2;
@@ -111,8 +119,19 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e)
static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
{
- long r2 = (__syscall)(n, a, b, c, d, e, f);
- if (r2 > -4096UL) return r2;
+ register long r4 __asm__("$4") = a;
+ register long r5 __asm__("$5") = b;
+ register long r6 __asm__("$6") = c;
+ register long r7 __asm__("$7") = d;
+ register long r8 __asm__("$8") = e;
+ register long r8 __asm__("$9") = f;
+ register long r2 __asm__("$2");
+ __asm__ __volatile__ (
+ "addu $2,$0,%2 ; syscall"
+ : "=&r"(r2), "=r"(r7) : "ir"(n), "0"(r2), "1"(r7),
+ "r"(r4), "r"(r5), "r"(r6), "r"(r8), "r"(r9)
+ : "$1", "$3", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
if (n == SYS_stat || n == SYS_fstat || n == SYS_lstat) __stat_fix(b);
if (n == SYS_newfstatat) __stat_fix(c);
return r2;