path: root/arch/csky/abiv1/inc
diff options
authorGuo Ren <>2019-08-20 12:47:24 +0800
committerGuo Ren <>2019-08-20 20:09:14 +0800
commitdc140045c0cace809af872e3799e8fbe1b7d7f86 (patch)
treebef3a506c2b97019e8ad3b4aabae6239be02e953 /arch/csky/abiv1/inc
parentbe819aa6f11145de32dab8690ec6055348488c18 (diff)
csky: Fixup defer cache flush for 610
We use defer cache flush mechanism to improve the performance of 610, but the implementation is wrong. We fix it up now and update the mechanism: - Zero page needn't be flushed. - If page is file mapping & non-touched in user space, defer flush. - If page is anon mapping or dirty file mapping, flush immediately. - In update_mmu_cache finish the defer flush by flush_dcache_page(). For 610 we need take care the dcache aliasing issue: - VIPT cache with 8K-bytes size per way in 4K page granularity. Signed-off-by: Guo Ren <> Cc: Arnd Bergmann <>
Diffstat (limited to 'arch/csky/abiv1/inc')
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h
index 5f663aef9b1b..fce5604cef40 100644
--- a/arch/csky/abiv1/inc/abi/cacheflush.h
+++ b/arch/csky/abiv1/inc/abi/cacheflush.h
@@ -26,8 +26,8 @@ extern void flush_dcache_page(struct page *);
#define flush_icache_page(vma, page) cache_wbinv_all()
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
-#define flush_icache_user_range(vma, pg, adr, len) \
- cache_wbinv_range(adr, adr + len)
+#define flush_icache_user_range(vma,page,addr,len) \
+ flush_dcache_page(page)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \