path: root/arch/riscv/kernel/patch.c
diff options
authorZong Li <>2020-04-21 15:30:01 +0800
committerPalmer Dabbelt <>2020-05-18 11:38:16 -0700
commit0ff7c3b331276f584bde3ae9a16bacd8fa3d01e6 (patch)
tree24a3643591160902f453747ebc0d8f457ad86673 /arch/riscv/kernel/patch.c
parent5303df244cbf2d9e5d37816c91c595a7afb7649a (diff)
riscv: Use text_mutex instead of patch_lock
We don't need the additional lock protection when patching the text. There are two patching interfaces here: - patch_text: patch code and always synchronize with stop_machine() - patch_text_nosync: patch code without synchronization, it's caller's responsibility to synchronize all CPUs if needed. For the first one, stop_machine() is protected by its own mutex, and also the irq is already disabled here. For the second one, in risc-v real case now, it would be used to ftrace patching the mcount function, since it already running under kstop_machine(), no other thread will run, so we could use text_mutex on ftrace side. Signed-off-by: Zong Li <> Reviewed-by: Masami Hiramatsu <> Reviewed-by: Palmer Dabbelt <> Signed-off-by: Palmer Dabbelt <>
Diffstat (limited to 'arch/riscv/kernel/patch.c')
1 files changed, 7 insertions, 6 deletions
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 8acb9ae2da08..5805791cd5b5 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/kprobes.h>
@@ -18,8 +19,6 @@ struct patch_insn {
-static DEFINE_RAW_SPINLOCK(patch_lock);
static void *patch_map(void *addr, int fixmap)
uintptr_t uintaddr = (uintptr_t) addr;
@@ -49,10 +48,14 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
void *waddr = addr;
bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
- unsigned long flags = 0;
int ret;
- raw_spin_lock_irqsave(&patch_lock, flags);
+ /*
+ * Before reaching here, it was expected to lock the text_mutex
+ * already, so we don't need to give another lock here and could
+ * ensure that it was safe between each cores.
+ */
+ lockdep_assert_held(&text_mutex);
if (across_pages)
patch_map(addr + len, FIX_TEXT_POKE1);
@@ -66,8 +69,6 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
if (across_pages)
- raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret;