summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRich Felker <dalias@libc.org>2016-03-22 22:02:23 +0000
committerRich Felker <dalias@libc.org>2016-04-11 22:55:42 +0000
commit63b9ae8deb759471bf9ff8f22ccb0e0749e9ebcc (patch)
treea8a634db707b27a5e1b31c35028e5e0c7b98c4fb
parent4b24b908964ecc8d402f3e7e0388af3d602bc3c0 (diff)
downloadlinux-sh-63b9ae8deb759471bf9ff8f22ccb0e0749e9ebcc.tar.gz
sh: disable aliased page logic on NOMMU models
SH3/4 (with MMU) have a virtually indexed cache, requiring explicit work to avoid consistency problems arising from having the same physical address range cached in multiple cache lines. This is unneeded for the NOMMU case, and some of the resulting code paths (kmap_coherent) don't work. SH2 only avoided this problem by having a 4-way associative cache with way size equal to the page size (4k), yielding no cache index bits outside of the page offset and thus no aliases. Signed-off-by: Rich Felker <dalias@libc.org>
-rw-r--r--arch/sh/kernel/cpu/init.c4
-rw-r--r--arch/sh/mm/cache.c4
2 files changed, 8 insertions, 0 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 0d7360d549c1..bfd9e2798008 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -323,9 +323,13 @@ asmlinkage void cpu_init(void)
cache_init();
if (raw_smp_processor_id() == 0) {
+#ifdef CONFIG_MMU
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
+#else
+ shm_align_mask = PAGE_SIZE - 1;
+#endif
/* Boot CPU sets the cache shape */
detect_cache_shape();
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index e58cfbf45150..776d664a40c5 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -244,7 +244,11 @@ void flush_cache_sigtramp(unsigned long address)
static void compute_alias(struct cache_info *c)
{
+#ifdef CONFIG_MMU
c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
+#else
+ c->alias_mask = 0;
+#endif
c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
}