bianbu-linux-6.6/arch/sparc/include/asm/cacheflush_32.h
Alexandre Ghiti c4a05cf0ed mm: Introduce flush_cache_vmap_early()
[ Upstream commit 7a92fc8b4d20680e4c20289a670d8fca2d1f2c1b ]

The pcpu setup when using the page allocator sets up a new vmalloc
mapping very early in the boot process, so early that it cannot use the
flush_cache_vmap() function which may depend on structures not yet
initialized (for example in riscv, we currently send an IPI to flush
other cpus TLB).

But on some architectures, we must call flush_cache_vmap(): for example,
in riscv, some uarchs can cache invalid TLB entries so we need to flush
the new established mapping to avoid taking an exception.

So fix this by introducing a new function flush_cache_vmap_early() which
is called right after setting the new page table entry and before
accessing this new mapping. This new function implements a local flush
tlb on riscv and is no-op for other architectures (same as today).

Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Stable-dep-of: d9807d60c145 ("riscv: mm: execute local TLB flush after populating vmemmap")
Signed-off-by: Sasha Levin <sashal@kernel.org>
2024-02-16 19:10:52 +01:00

63 lines
2.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/page-flags.h>
#include <asm/cachetlb_32.h>
#define flush_cache_all() \
sparc32_cachetlb_ops->cache_all()
#define flush_cache_mm(mm) \
sparc32_cachetlb_ops->cache_mm(mm)
#define flush_cache_dup_mm(mm) \
sparc32_cachetlb_ops->cache_mm(mm)
#define flush_cache_range(vma,start,end) \
sparc32_cachetlb_ops->cache_range(vma, start, end)
#define flush_cache_page(vma,addr,pfn) \
sparc32_cachetlb_ops->cache_page(vma, addr)
#define flush_icache_range(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define __flush_page_to_ram(addr) \
sparc32_cachetlb_ops->page_to_ram(addr)
#define flush_sig_insns(mm,insn_addr) \
sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
#define flush_page_for_dma(addr) \
sparc32_cachetlb_ops->page_for_dma(addr)
void sparc_flush_page_to_ram(struct page *page);
void sparc_flush_folio_to_ram(struct folio *folio);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_folio(folio) sparc_flush_folio_to_ram(folio)
static inline void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()
/* When a context switch happens we must flush all user windows so that
* the windows of the current process are flushed onto its stack. This
* way the windows are all clean for the next process and the stack
* frames are up to date.
*/
void flush_user_windows(void);
void kill_user_windows(void);
void flushw_all(void);
#endif /* _SPARC_CACHEFLUSH_H */