summaryrefslogtreecommitdiff
path: root/arch/csky/abiv2/cacheflush.c
diff options
context:
space:
mode:
authorGuo Ren <guoren@linux.alibaba.com>2020-01-31 20:33:10 +0800
committerGuo Ren <guoren@linux.alibaba.com>2020-02-21 15:43:24 +0800
commit997153b9a75c08d545ad45e6f8ceb432435d2425 (patch)
tree830915f73d05e7d094d2f293a712e05c0da1e6f5 /arch/csky/abiv2/cacheflush.c
parentcc1f6563a92ced0889775d0587316d725b6e1a68 (diff)
downloadlinux-sh-997153b9a75c08d545ad45e6f8ceb432435d2425.tar.gz
csky: Add flush_icache_mm to defer flush icache all
Some CPUs don't support icache.va instruction to maintain the whole smp cores' icache. Using icache.all + IPI casue a lot on performace and using defer mechanism could reduce the number of calling icache _flush_all functions. Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Diffstat (limited to 'arch/csky/abiv2/cacheflush.c')
-rw-r--r--arch/csky/abiv2/cacheflush.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index ba469953a16e..790f1ebfba44 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -28,3 +28,58 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
kunmap_atomic((void *) addr);
}
+
+void flush_icache_deferred(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+ cpumask_t *mask = &mm->context.icache_stale_mask;
+
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
+ /*
+ * Ensure the remote hart's writes are visible to this hart.
+ * This pairs with a barrier in flush_icache_mm.
+ */
+ smp_mb();
+ local_icache_inv_all(NULL);
+ }
+}
+
+void flush_icache_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ unsigned int cpu;
+ cpumask_t others, *mask;
+
+ preempt_disable();
+
+#ifdef CONFIG_CPU_HAS_ICACHE_INS
+ if (mm == current->mm) {
+ icache_inv_range(start, end);
+ preempt_enable();
+ return;
+ }
+#endif
+
+ /* Mark every hart's icache as needing a flush for this MM. */
+ mask = &mm->context.icache_stale_mask;
+ cpumask_setall(mask);
+
+ /* Flush this hart's I$ now, and mark it as flushed. */
+ cpu = smp_processor_id();
+ cpumask_clear_cpu(cpu, mask);
+ local_icache_inv_all(NULL);
+
+ /*
+ * Flush the I$ of other harts concurrently executing, and mark them as
+ * flushed.
+ */
+ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+
+ if (mm != current->active_mm || !cpumask_empty(&others)) {
+ on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
+ cpumask_clear(mask);
+ }
+
+ preempt_enable();
+}