mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Doing this is a bit intrusive: virt_to_pfn() requires PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in <asm/page.h>, so this must be included *before* <asm/memory.h>. The use of macros were obscuring the unclear inclusion order here, as the macros would eventually be resolved, but a static inline like this cannot be compiled with unresolved macros. The naive solution to include <asm/page.h> at the top of <asm/memory.h> does not work, because <asm/memory.h> sometimes includes <asm/page.h> at the end of itself, which would create a confusing inclusion loop. So instead, take the approach to always unconditionally include <asm/page.h> at the end of <asm/memory.h> arch/arm uses <asm/memory.h> explicitly in a lot of places, however it turns out that if we just unconditionally include <asm/memory.h> into <asm/page.h> and switch all inclusions of <asm/memory.h> to <asm/page.h> instead, we enforce the right order and <asm/memory.h> will always have access to the definitions. Put an inclusion guard in place making it impossible to include <asm/memory.h> explicitly. Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
111 lines
2.6 KiB
C
111 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
#ifndef __ASM_UACCESS_ASM_H__
|
|
#define __ASM_UACCESS_ASM_H__
|
|
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/domain.h>
|
|
#include <asm/page.h>
|
|
#include <asm/thread_info.h>
|
|
|
|
.macro csdb
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
.inst.w 0xf3af8014
|
|
#else
|
|
.inst 0xe320f014
|
|
#endif
|
|
.endm
|
|
|
|
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
|
|
#ifndef CONFIG_CPU_USE_DOMAINS
|
|
adds \tmp, \addr, #\size - 1
|
|
sbcscc \tmp, \tmp, \limit
|
|
bcs \bad
|
|
#ifdef CONFIG_CPU_SPECTRE
|
|
movcs \addr, #0
|
|
csdb
|
|
#endif
|
|
#endif
|
|
.endm
|
|
|
|
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
|
#ifdef CONFIG_CPU_SPECTRE
|
|
sub \tmp, \limit, #1
|
|
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
|
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
|
subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
|
movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
|
csdb
|
|
#endif
|
|
.endm
|
|
|
|
.macro uaccess_disable, tmp, isb=1
|
|
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
|
/*
|
|
* Whenever we re-enter userspace, the domains should always be
|
|
* set appropriately.
|
|
*/
|
|
mov \tmp, #DACR_UACCESS_DISABLE
|
|
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
|
|
.if \isb
|
|
instr_sync
|
|
.endif
|
|
#endif
|
|
.endm
|
|
|
|
.macro uaccess_enable, tmp, isb=1
|
|
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
|
/*
|
|
* Whenever we re-enter userspace, the domains should always be
|
|
* set appropriately.
|
|
*/
|
|
mov \tmp, #DACR_UACCESS_ENABLE
|
|
mcr p15, 0, \tmp, c3, c0, 0
|
|
.if \isb
|
|
instr_sync
|
|
.endif
|
|
#endif
|
|
.endm
|
|
|
|
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
|
|
#define DACR(x...) x
|
|
#else
|
|
#define DACR(x...)
|
|
#endif
|
|
|
|
/*
|
|
* Save the address limit on entry to a privileged exception.
|
|
*
|
|
* If we are using the DACR for kernel access by the user accessors
|
|
* (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
|
|
* back to client mode, whether or not \disable is set.
|
|
*
|
|
* If we are using SW PAN, set the DACR user domain to no access
|
|
* if \disable is set.
|
|
*/
|
|
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
|
|
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
|
|
DACR( str \tmp0, [sp, #SVC_DACR])
|
|
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
|
|
/* kernel=client, user=no access */
|
|
mov \tmp2, #DACR_UACCESS_DISABLE
|
|
mcr p15, 0, \tmp2, c3, c0, 0
|
|
instr_sync
|
|
.elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
|
|
/* kernel=client */
|
|
bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
|
|
orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
|
|
mcr p15, 0, \tmp2, c3, c0, 0
|
|
instr_sync
|
|
.endif
|
|
.endm
|
|
|
|
/* Restore the user access state previously saved by uaccess_entry */
|
|
.macro uaccess_exit, tsk, tmp0, tmp1
|
|
DACR( ldr \tmp0, [sp, #SVC_DACR])
|
|
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
|
|
.endm
|
|
|
|
#undef DACR
|
|
|
|
#endif /* __ASM_UACCESS_ASM_H__ */
|