summaryrefslogtreecommitdiff
path: root/arch/csky/abiv2
diff options
context:
space:
mode:
authorGuo Ren <ren_guo@c-sky.com>2018-09-05 14:25:10 +0800
committerGuo Ren <ren_guo@c-sky.com>2018-10-25 23:36:19 +0800
commit00a9730e1007c6cc87a7c78af2f24a4105d616ee (patch)
treec014e5a0606a7a88b6e3493f49862c040f9aeea8 /arch/csky/abiv2
parent4859bfca11c7d63d55175bcd85a75d6cee4b7184 (diff)
downloadlinux-sh-00a9730e1007c6cc87a7c78af2f24a4105d616ee.tar.gz
csky: Cache and TLB routines
This patch adds cache and tlb sync codes for abiv1 & abiv2. Signed-off-by: Guo Ren <ren_guo@c-sky.com> Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/csky/abiv2')
-rw-r--r--arch/csky/abiv2/cacheflush.c60
-rw-r--r--arch/csky/abiv2/inc/abi/cacheflush.h46
2 files changed, 106 insertions, 0 deletions
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
new file mode 100644
index 000000000000..d22c95ffc74d
--- /dev/null
+++ b/arch/csky/abiv2/cacheflush.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/cache.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <asm/cache.h>
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long start;
+
+ start = (unsigned long) kmap_atomic(page);
+
+ cache_wbinv_range(start, start + PAGE_SIZE);
+
+ kunmap_atomic((void *)start);
+}
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, int len)
+{
+ unsigned long kaddr;
+
+ kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
+
+ cache_wbinv_range(kaddr, kaddr + len);
+
+ kunmap_atomic((void *)kaddr);
+}
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte)
+{
+ unsigned long addr, pfn;
+ struct page *page;
+ void *va;
+
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+ pfn = pte_pfn(*pte);
+ if (unlikely(!pfn_valid(pfn)))
+ return;
+
+ page = pfn_to_page(pfn);
+ if (page == ZERO_PAGE(0))
+ return;
+
+ va = page_address(page);
+ addr = (unsigned long) va;
+
+ if (va == NULL && PageHighMem(page))
+ addr = (unsigned long) kmap_atomic(page);
+
+ cache_wbinv_range(addr, addr + PAGE_SIZE);
+
+ if (va == NULL && PageHighMem(page))
+ kunmap_atomic((void *) addr);
+}
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
new file mode 100644
index 000000000000..b8db5e0b2fe3
--- /dev/null
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ABI_CSKY_CACHEFLUSH_H
+#define __ABI_CSKY_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+
+/*
+ * The cache doesn't need to be flushed when TLB entries change when
+ * the cache is mapped to physical memory, not virtual memory
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+
+#define flush_cache_range(vma, start, end) \
+ do { \
+ if (vma->vm_flags & VM_EXEC) \
+ icache_inv_all(); \
+ } while (0)
+
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#define flush_icache_range(start, end) cache_wbinv_range(start, end)
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, int len);
+
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \
+} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif /* __ABI_CSKY_CACHEFLUSH_H */