summaryrefslogtreecommitdiff
path: root/arch/microblaze
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2014-04-02 14:13:20 -0400
committerRich Felker <dalias@aerifal.cx>2014-04-02 14:13:20 -0400
commit91d5aa06572d2660122f9a06ed242fef0383f292 (patch)
tree9149f4c55959571f3bb87eff4d38230875f79828 /arch/microblaze
parent2b74315d8a31ad8fbcd369116c82e055e0ec3fb7 (diff)
downloadmusl-91d5aa06572d2660122f9a06ed242fef0383f292.tar.gz
fix microblaze syscall register clobbers
the kernel entry point for syscalls on microblaze nominally saves and restores all registers, and testing on qemu always worked since qemu behaves this way too. however, the real kernel treats r3:r4 as a potential 64-bit return value from the syscall function, and copies both over top of the saved registers before returning to userspace. thus, we need to treat r4 as always-clobbered.
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/syscall_arch.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/microblaze/syscall_arch.h b/arch/microblaze/syscall_arch.h
index 70217ffa..cab4607d 100644
--- a/arch/microblaze/syscall_arch.h
+++ b/arch/microblaze/syscall_arch.h
@@ -13,7 +13,7 @@ static __inline long __syscall0(long n)
register unsigned long r3 __asm__("r3");
__asm__ __volatile__ ("brki r14, 0x8" : "=r"(r3)
: "r"(r12)
- : "memory");
+ : "memory", "r4");
return r3;
}
@@ -24,7 +24,7 @@ static inline long __syscall1(long n, long a)
register unsigned long r5 __asm__("r5") = a;
__asm__ __volatile__ ("brki r14, 0x8" : "=r"(r3)
: "r"(r12), "r"(r5)
- : "memory");
+ : "memory", "r4");
return r3;
}
@@ -36,7 +36,7 @@ static inline long __syscall2(long n, long a, long b)
register unsigned long r6 __asm__("r6") = b;
__asm__ __volatile__ ("brki r14, 0x8" : "=r"(r3)
: "r"(r12), "r"(r5), "r"(r6)
- : "memory");
+ : "memory", "r4");
return r3;
}
@@ -49,7 +49,7 @@ static inline long __syscall3(long n, long a, long b, long c)
register unsigned long r7 __asm__("r7") = c;
__asm__ __volatile__ ("brki r14, 0x8" : "=r"(r3)
: "r"(r12), "r"(r5), "r"(r6), "r"(r7)
- : "memory");
+ : "memory", "r4");
return r3;
}
@@ -63,7 +63,7 @@ static inline long __syscall4(long n, long a, long b, long c, long d)
register unsigned long r8 __asm__("r8") = d;
__asm__ __volatile__ ("brki r14, 0x8" : "=r"(r3)
: "r"(r12), "r"(r5), "r"(r6), "r"(r7), "r"(r8)
- : "memory");
+ : "memory", "r4");
return r3;
}
@@ -78,7 +78,7 @@ static inline long __syscall5(long n, long a, long b, long c, long d, long e)
register unsigned long r9 __asm__("r9") = e;
__asm__ __volatile__ ("brki r14, 0x8" : "=r"(r3)
: "r"(r12), "r"(r5), "r"(r6), "r"(r7), "r"(r8), "r"(r9)
- : "memory");
+ : "memory", "r4");
return r3;
}
@@ -94,7 +94,7 @@ static inline long __syscall6(long n, long a, long b, long c, long d, long e, lo
register unsigned long r10 __asm__("r10") = f;
__asm__ __volatile__ ("brki r14, 0x8" : "=r"(r3)
: "r"(r12), "r"(r5), "r"(r6), "r"(r7), "r"(r8), "r"(r9), "r"(r10)
- : "memory");
+ : "memory", "r4");
return r3;
}