mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
riscv: Use SYM_*() assembly macros instead of deprecated ones
ENTRY()/END()/WEAK() macros are deprecated and we should make use of the new SYM_*() macros [1] for better annotation of symbols. Replace the deprecated ones with the new ones and fix wrong usage of END()/ENDPROC() to correctly describe the symbols. [1] https://docs.kernel.org/core-api/asm-annotations.html Change-Id: If9fd7dd8ee36d94d26890a68bd30f22a92c3abd1 Signed-off-by: Clément Léger <cleger@rivosinc.com> Reviewed-by: Andrew Jones <ajones@ventanamicro.com> Link: https://lore.kernel.org/r/20231024132655.730417-3-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
parent
6b9a31c67e
commit
ef96562cc3
17 changed files with 60 additions and 73 deletions
|
@ -9,7 +9,7 @@
|
||||||
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
|
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
|
||||||
/* Performs a memcpy without aligning buffers, using word loads and stores. */
|
/* Performs a memcpy without aligning buffers, using word loads and stores. */
|
||||||
/* Note: The size is truncated to a multiple of 8 * SZREG */
|
/* Note: The size is truncated to a multiple of 8 * SZREG */
|
||||||
ENTRY(__riscv_copy_words_unaligned)
|
SYM_FUNC_START(__riscv_copy_words_unaligned)
|
||||||
andi a4, a2, ~((8*SZREG)-1)
|
andi a4, a2, ~((8*SZREG)-1)
|
||||||
beqz a4, 2f
|
beqz a4, 2f
|
||||||
add a3, a1, a4
|
add a3, a1, a4
|
||||||
|
@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)
|
||||||
|
|
||||||
2:
|
2:
|
||||||
ret
|
ret
|
||||||
END(__riscv_copy_words_unaligned)
|
SYM_FUNC_END(__riscv_copy_words_unaligned)
|
||||||
|
|
||||||
/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
|
/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
|
||||||
/* Performs a memcpy without aligning buffers, using only byte accesses. */
|
/* Performs a memcpy without aligning buffers, using only byte accesses. */
|
||||||
/* Note: The size is truncated to a multiple of 8 */
|
/* Note: The size is truncated to a multiple of 8 */
|
||||||
ENTRY(__riscv_copy_bytes_unaligned)
|
SYM_FUNC_START(__riscv_copy_bytes_unaligned)
|
||||||
andi a4, a2, ~(8-1)
|
andi a4, a2, ~(8-1)
|
||||||
beqz a4, 2f
|
beqz a4, 2f
|
||||||
add a3, a1, a4
|
add a3, a1, a4
|
||||||
|
@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)
|
||||||
|
|
||||||
2:
|
2:
|
||||||
ret
|
ret
|
||||||
END(__riscv_copy_bytes_unaligned)
|
SYM_FUNC_END(__riscv_copy_bytes_unaligned)
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
#include <asm/csr.h>
|
#include <asm/csr.h>
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
|
|
||||||
ENTRY(__fstate_save)
|
SYM_FUNC_START(__fstate_save)
|
||||||
li a2, TASK_THREAD_F0
|
li a2, TASK_THREAD_F0
|
||||||
add a0, a0, a2
|
add a0, a0, a2
|
||||||
li t1, SR_FS
|
li t1, SR_FS
|
||||||
|
@ -60,9 +60,9 @@ ENTRY(__fstate_save)
|
||||||
sw t0, TASK_THREAD_FCSR_F0(a0)
|
sw t0, TASK_THREAD_FCSR_F0(a0)
|
||||||
csrc CSR_STATUS, t1
|
csrc CSR_STATUS, t1
|
||||||
ret
|
ret
|
||||||
ENDPROC(__fstate_save)
|
SYM_FUNC_END(__fstate_save)
|
||||||
|
|
||||||
ENTRY(__fstate_restore)
|
SYM_FUNC_START(__fstate_restore)
|
||||||
li a2, TASK_THREAD_F0
|
li a2, TASK_THREAD_F0
|
||||||
add a0, a0, a2
|
add a0, a0, a2
|
||||||
li t1, SR_FS
|
li t1, SR_FS
|
||||||
|
@ -103,7 +103,7 @@ ENTRY(__fstate_restore)
|
||||||
fscsr t0
|
fscsr t0
|
||||||
csrc CSR_STATUS, t1
|
csrc CSR_STATUS, t1
|
||||||
ret
|
ret
|
||||||
ENDPROC(__fstate_restore)
|
SYM_FUNC_END(__fstate_restore)
|
||||||
|
|
||||||
#define get_f32(which) fmv.x.s a0, which; j 2f
|
#define get_f32(which) fmv.x.s a0, which; j 2f
|
||||||
#define put_f32(which) fmv.s.x which, a1; j 2f
|
#define put_f32(which) fmv.s.x which, a1; j 2f
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
#include "efi-header.S"
|
#include "efi-header.S"
|
||||||
|
|
||||||
__HEAD
|
__HEAD
|
||||||
ENTRY(_start)
|
SYM_CODE_START(_start)
|
||||||
/*
|
/*
|
||||||
* Image header expected by Linux boot-loaders. The image header data
|
* Image header expected by Linux boot-loaders. The image header data
|
||||||
* structure is described in asm/image.h.
|
* structure is described in asm/image.h.
|
||||||
|
@ -188,9 +188,9 @@ secondary_start_sbi:
|
||||||
wfi
|
wfi
|
||||||
j .Lsecondary_park
|
j .Lsecondary_park
|
||||||
|
|
||||||
END(_start)
|
SYM_CODE_END(_start)
|
||||||
|
|
||||||
ENTRY(_start_kernel)
|
SYM_CODE_START(_start_kernel)
|
||||||
/* Mask all interrupts */
|
/* Mask all interrupts */
|
||||||
csrw CSR_IE, zero
|
csrw CSR_IE, zero
|
||||||
csrw CSR_IP, zero
|
csrw CSR_IP, zero
|
||||||
|
@ -352,10 +352,10 @@ ENTRY(_start_kernel)
|
||||||
tail .Lsecondary_start_common
|
tail .Lsecondary_start_common
|
||||||
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
|
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
|
||||||
|
|
||||||
END(_start_kernel)
|
SYM_CODE_END(_start_kernel)
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_M_MODE
|
#ifdef CONFIG_RISCV_M_MODE
|
||||||
ENTRY(reset_regs)
|
SYM_CODE_START_LOCAL(reset_regs)
|
||||||
li sp, 0
|
li sp, 0
|
||||||
li gp, 0
|
li gp, 0
|
||||||
li tp, 0
|
li tp, 0
|
||||||
|
@ -453,5 +453,5 @@ ENTRY(reset_regs)
|
||||||
.Lreset_regs_done_vector:
|
.Lreset_regs_done_vector:
|
||||||
#endif /* CONFIG_RISCV_ISA_V */
|
#endif /* CONFIG_RISCV_ISA_V */
|
||||||
ret
|
ret
|
||||||
END(reset_regs)
|
SYM_CODE_END(reset_regs)
|
||||||
#endif /* CONFIG_RISCV_M_MODE */
|
#endif /* CONFIG_RISCV_M_MODE */
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
*
|
*
|
||||||
* Always returns 0
|
* Always returns 0
|
||||||
*/
|
*/
|
||||||
ENTRY(__hibernate_cpu_resume)
|
SYM_FUNC_START(__hibernate_cpu_resume)
|
||||||
/* switch to hibernated image's page table. */
|
/* switch to hibernated image's page table. */
|
||||||
csrw CSR_SATP, s0
|
csrw CSR_SATP, s0
|
||||||
sfence.vma
|
sfence.vma
|
||||||
|
@ -34,7 +34,7 @@ ENTRY(__hibernate_cpu_resume)
|
||||||
mv a0, zero
|
mv a0, zero
|
||||||
|
|
||||||
ret
|
ret
|
||||||
END(__hibernate_cpu_resume)
|
SYM_FUNC_END(__hibernate_cpu_resume)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prepare to restore the image.
|
* Prepare to restore the image.
|
||||||
|
@ -42,7 +42,7 @@ END(__hibernate_cpu_resume)
|
||||||
* a1: satp of temporary page tables.
|
* a1: satp of temporary page tables.
|
||||||
* a2: cpu_resume.
|
* a2: cpu_resume.
|
||||||
*/
|
*/
|
||||||
ENTRY(hibernate_restore_image)
|
SYM_FUNC_START(hibernate_restore_image)
|
||||||
mv s0, a0
|
mv s0, a0
|
||||||
mv s1, a1
|
mv s1, a1
|
||||||
mv s2, a2
|
mv s2, a2
|
||||||
|
@ -50,7 +50,7 @@ ENTRY(hibernate_restore_image)
|
||||||
REG_L a1, relocated_restore_code
|
REG_L a1, relocated_restore_code
|
||||||
|
|
||||||
jr a1
|
jr a1
|
||||||
END(hibernate_restore_image)
|
SYM_FUNC_END(hibernate_restore_image)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The below code will be executed from a 'safe' page.
|
* The below code will be executed from a 'safe' page.
|
||||||
|
@ -58,7 +58,7 @@ END(hibernate_restore_image)
|
||||||
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
|
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
|
||||||
* to restore the CPU context.
|
* to restore the CPU context.
|
||||||
*/
|
*/
|
||||||
ENTRY(hibernate_core_restore_code)
|
SYM_FUNC_START(hibernate_core_restore_code)
|
||||||
/* switch to temp page table. */
|
/* switch to temp page table. */
|
||||||
csrw satp, s1
|
csrw satp, s1
|
||||||
sfence.vma
|
sfence.vma
|
||||||
|
@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
|
||||||
bnez s4, .Lcopy
|
bnez s4, .Lcopy
|
||||||
|
|
||||||
jr s2
|
jr s2
|
||||||
END(hibernate_core_restore_code)
|
SYM_FUNC_END(hibernate_core_restore_code)
|
||||||
|
|
|
@ -82,7 +82,7 @@
|
||||||
.endm
|
.endm
|
||||||
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
||||||
|
|
||||||
ENTRY(ftrace_caller)
|
SYM_FUNC_START(ftrace_caller)
|
||||||
SAVE_ABI
|
SAVE_ABI
|
||||||
|
|
||||||
addi a0, t0, -FENTRY_RA_OFFSET
|
addi a0, t0, -FENTRY_RA_OFFSET
|
||||||
|
@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
|
||||||
mv a1, ra
|
mv a1, ra
|
||||||
mv a3, sp
|
mv a3, sp
|
||||||
|
|
||||||
ftrace_call:
|
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
|
||||||
.global ftrace_call
|
|
||||||
call ftrace_stub
|
call ftrace_stub
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
|
@ -102,16 +101,15 @@ ftrace_call:
|
||||||
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
|
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
|
||||||
mv a2, s0
|
mv a2, s0
|
||||||
#endif
|
#endif
|
||||||
ftrace_graph_call:
|
SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
|
||||||
.global ftrace_graph_call
|
|
||||||
call ftrace_stub
|
call ftrace_stub
|
||||||
#endif
|
#endif
|
||||||
RESTORE_ABI
|
RESTORE_ABI
|
||||||
jr t0
|
jr t0
|
||||||
ENDPROC(ftrace_caller)
|
SYM_FUNC_END(ftrace_caller)
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||||
ENTRY(ftrace_regs_caller)
|
SYM_FUNC_START(ftrace_regs_caller)
|
||||||
SAVE_ALL
|
SAVE_ALL
|
||||||
|
|
||||||
addi a0, t0, -FENTRY_RA_OFFSET
|
addi a0, t0, -FENTRY_RA_OFFSET
|
||||||
|
@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
|
||||||
mv a1, ra
|
mv a1, ra
|
||||||
mv a3, sp
|
mv a3, sp
|
||||||
|
|
||||||
ftrace_regs_call:
|
SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
|
||||||
.global ftrace_regs_call
|
|
||||||
call ftrace_stub
|
call ftrace_stub
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
|
@ -131,12 +128,11 @@ ftrace_regs_call:
|
||||||
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
|
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
|
||||||
mv a2, s0
|
mv a2, s0
|
||||||
#endif
|
#endif
|
||||||
ftrace_graph_regs_call:
|
SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
|
||||||
.global ftrace_graph_regs_call
|
|
||||||
call ftrace_stub
|
call ftrace_stub
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
RESTORE_ALL
|
RESTORE_ALL
|
||||||
jr t0
|
jr t0
|
||||||
ENDPROC(ftrace_regs_caller)
|
SYM_FUNC_END(ftrace_regs_caller)
|
||||||
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
|
||||||
|
|
|
@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(ftrace_stub_graph)
|
SYM_FUNC_END(ftrace_stub_graph)
|
||||||
|
|
||||||
ENTRY(return_to_handler)
|
SYM_FUNC_START(return_to_handler)
|
||||||
/*
|
/*
|
||||||
* On implementing the frame point test, the ideal way is to compare the
|
* On implementing the frame point test, the ideal way is to compare the
|
||||||
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
|
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
|
||||||
|
@ -76,11 +76,11 @@ ENTRY(return_to_handler)
|
||||||
mv a2, a0
|
mv a2, a0
|
||||||
RESTORE_RET_ABI_STATE
|
RESTORE_RET_ABI_STATE
|
||||||
jalr a2
|
jalr a2
|
||||||
ENDPROC(return_to_handler)
|
SYM_FUNC_END(return_to_handler)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||||
ENTRY(MCOUNT_NAME)
|
SYM_FUNC_START(MCOUNT_NAME)
|
||||||
la t4, ftrace_stub
|
la t4, ftrace_stub
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
la t0, ftrace_graph_return
|
la t0, ftrace_graph_return
|
||||||
|
@ -126,6 +126,6 @@ ENTRY(MCOUNT_NAME)
|
||||||
jalr t5
|
jalr t5
|
||||||
RESTORE_ABI_STATE
|
RESTORE_ABI_STATE
|
||||||
ret
|
ret
|
||||||
ENDPROC(MCOUNT_NAME)
|
SYM_FUNC_END(MCOUNT_NAME)
|
||||||
#endif
|
#endif
|
||||||
EXPORT_SYMBOL(MCOUNT_NAME)
|
EXPORT_SYMBOL(MCOUNT_NAME)
|
||||||
|
|
|
@ -75,7 +75,7 @@
|
||||||
REG_L x31, PT_T6(sp)
|
REG_L x31, PT_T6(sp)
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
ENTRY(arch_rethook_trampoline)
|
SYM_CODE_START(arch_rethook_trampoline)
|
||||||
addi sp, sp, -(PT_SIZE_ON_STACK)
|
addi sp, sp, -(PT_SIZE_ON_STACK)
|
||||||
save_all_base_regs
|
save_all_base_regs
|
||||||
|
|
||||||
|
@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
|
||||||
addi sp, sp, PT_SIZE_ON_STACK
|
addi sp, sp, PT_SIZE_ON_STACK
|
||||||
|
|
||||||
ret
|
ret
|
||||||
ENDPROC(arch_rethook_trampoline)
|
SYM_CODE_END(arch_rethook_trampoline)
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
.altmacro
|
.altmacro
|
||||||
.option norelax
|
.option norelax
|
||||||
|
|
||||||
ENTRY(__cpu_suspend_enter)
|
SYM_FUNC_START(__cpu_suspend_enter)
|
||||||
/* Save registers (except A0 and T0-T6) */
|
/* Save registers (except A0 and T0-T6) */
|
||||||
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
|
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
|
||||||
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
|
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
|
||||||
|
@ -57,7 +57,7 @@ ENTRY(__cpu_suspend_enter)
|
||||||
|
|
||||||
/* Return to C code */
|
/* Return to C code */
|
||||||
ret
|
ret
|
||||||
END(__cpu_suspend_enter)
|
SYM_FUNC_END(__cpu_suspend_enter)
|
||||||
|
|
||||||
SYM_TYPED_FUNC_START(__cpu_resume_enter)
|
SYM_TYPED_FUNC_START(__cpu_resume_enter)
|
||||||
/* Load the global pointer */
|
/* Load the global pointer */
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
.text
|
.text
|
||||||
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
|
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
|
||||||
ENTRY(__vdso_flush_icache)
|
SYM_FUNC_START(__vdso_flush_icache)
|
||||||
.cfi_startproc
|
.cfi_startproc
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
li a7, __NR_riscv_flush_icache
|
li a7, __NR_riscv_flush_icache
|
||||||
|
@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
|
||||||
#endif
|
#endif
|
||||||
ret
|
ret
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
ENDPROC(__vdso_flush_icache)
|
SYM_FUNC_END(__vdso_flush_icache)
|
||||||
|
|
|
@ -8,11 +8,11 @@
|
||||||
|
|
||||||
.text
|
.text
|
||||||
/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
|
/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
|
||||||
ENTRY(__vdso_getcpu)
|
SYM_FUNC_START(__vdso_getcpu)
|
||||||
.cfi_startproc
|
.cfi_startproc
|
||||||
/* For now, just do the syscall. */
|
/* For now, just do the syscall. */
|
||||||
li a7, __NR_getcpu
|
li a7, __NR_getcpu
|
||||||
ecall
|
ecall
|
||||||
ret
|
ret
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
ENDPROC(__vdso_getcpu)
|
SYM_FUNC_END(__vdso_getcpu)
|
||||||
|
|
|
@ -7,10 +7,10 @@
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
.text
|
.text
|
||||||
ENTRY(__vdso_rt_sigreturn)
|
SYM_FUNC_START(__vdso_rt_sigreturn)
|
||||||
.cfi_startproc
|
.cfi_startproc
|
||||||
.cfi_signal_frame
|
.cfi_signal_frame
|
||||||
li a7, __NR_rt_sigreturn
|
li a7, __NR_rt_sigreturn
|
||||||
ecall
|
ecall
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
ENDPROC(__vdso_rt_sigreturn)
|
SYM_FUNC_END(__vdso_rt_sigreturn)
|
||||||
|
|
|
@ -5,11 +5,11 @@
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
|
|
||||||
.text
|
.text
|
||||||
ENTRY(riscv_hwprobe)
|
SYM_FUNC_START(riscv_hwprobe)
|
||||||
.cfi_startproc
|
.cfi_startproc
|
||||||
li a7, __NR_riscv_hwprobe
|
li a7, __NR_riscv_hwprobe
|
||||||
ecall
|
ecall
|
||||||
ret
|
ret
|
||||||
|
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
ENDPROC(riscv_hwprobe)
|
SYM_FUNC_END(riscv_hwprobe)
|
||||||
|
|
|
@ -7,8 +7,7 @@
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
|
|
||||||
/* void *memcpy(void *, const void *, size_t) */
|
/* void *memcpy(void *, const void *, size_t) */
|
||||||
ENTRY(__memcpy)
|
SYM_FUNC_START(__memcpy)
|
||||||
WEAK(memcpy)
|
|
||||||
move t6, a0 /* Preserve return value */
|
move t6, a0 /* Preserve return value */
|
||||||
|
|
||||||
/* Defer to byte-oriented copy for small sizes */
|
/* Defer to byte-oriented copy for small sizes */
|
||||||
|
@ -105,6 +104,7 @@ WEAK(memcpy)
|
||||||
bltu a1, a3, 5b
|
bltu a1, a3, 5b
|
||||||
6:
|
6:
|
||||||
ret
|
ret
|
||||||
END(__memcpy)
|
SYM_FUNC_END(__memcpy)
|
||||||
|
SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
|
||||||
SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
|
SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
|
||||||
SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)
|
SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
|
|
||||||
SYM_FUNC_START(__memmove)
|
SYM_FUNC_START(__memmove)
|
||||||
SYM_FUNC_START_WEAK(memmove)
|
|
||||||
/*
|
/*
|
||||||
* Returns
|
* Returns
|
||||||
* a0 - dest
|
* a0 - dest
|
||||||
|
@ -312,7 +311,7 @@ SYM_FUNC_START_WEAK(memmove)
|
||||||
.Lreturn_from_memmove:
|
.Lreturn_from_memmove:
|
||||||
ret
|
ret
|
||||||
|
|
||||||
SYM_FUNC_END(memmove)
|
|
||||||
SYM_FUNC_END(__memmove)
|
SYM_FUNC_END(__memmove)
|
||||||
|
SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
|
||||||
SYM_FUNC_ALIAS(__pi_memmove, __memmove)
|
SYM_FUNC_ALIAS(__pi_memmove, __memmove)
|
||||||
SYM_FUNC_ALIAS(__pi___memmove, __memmove)
|
SYM_FUNC_ALIAS(__pi___memmove, __memmove)
|
||||||
|
|
|
@ -8,8 +8,7 @@
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
|
|
||||||
/* void *memset(void *, int, size_t) */
|
/* void *memset(void *, int, size_t) */
|
||||||
ENTRY(__memset)
|
SYM_FUNC_START(__memset)
|
||||||
WEAK(memset)
|
|
||||||
move t0, a0 /* Preserve return value */
|
move t0, a0 /* Preserve return value */
|
||||||
|
|
||||||
/* Defer to byte-oriented fill for small sizes */
|
/* Defer to byte-oriented fill for small sizes */
|
||||||
|
@ -110,4 +109,5 @@ WEAK(memset)
|
||||||
bltu t0, a3, 5b
|
bltu t0, a3, 5b
|
||||||
6:
|
6:
|
||||||
ret
|
ret
|
||||||
END(__memset)
|
SYM_FUNC_END(__memset)
|
||||||
|
SYM_FUNC_ALIAS_WEAK(memset, __memset)
|
||||||
|
|
|
@ -10,8 +10,7 @@
|
||||||
_asm_extable 100b, \lbl
|
_asm_extable 100b, \lbl
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
ENTRY(__asm_copy_to_user)
|
SYM_FUNC_START(__asm_copy_to_user)
|
||||||
ENTRY(__asm_copy_from_user)
|
|
||||||
|
|
||||||
/* Enable access to user memory */
|
/* Enable access to user memory */
|
||||||
li t6, SR_SUM
|
li t6, SR_SUM
|
||||||
|
@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
|
||||||
csrc CSR_STATUS, t6
|
csrc CSR_STATUS, t6
|
||||||
sub a0, t5, a0
|
sub a0, t5, a0
|
||||||
ret
|
ret
|
||||||
ENDPROC(__asm_copy_to_user)
|
SYM_FUNC_END(__asm_copy_to_user)
|
||||||
ENDPROC(__asm_copy_from_user)
|
|
||||||
EXPORT_SYMBOL(__asm_copy_to_user)
|
EXPORT_SYMBOL(__asm_copy_to_user)
|
||||||
|
SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
|
||||||
EXPORT_SYMBOL(__asm_copy_from_user)
|
EXPORT_SYMBOL(__asm_copy_from_user)
|
||||||
|
|
||||||
|
|
||||||
ENTRY(__clear_user)
|
SYM_FUNC_START(__clear_user)
|
||||||
|
|
||||||
/* Enable access to user memory */
|
/* Enable access to user memory */
|
||||||
li t6, SR_SUM
|
li t6, SR_SUM
|
||||||
|
@ -233,5 +232,5 @@ ENTRY(__clear_user)
|
||||||
csrc CSR_STATUS, t6
|
csrc CSR_STATUS, t6
|
||||||
sub a0, a3, a0
|
sub a0, a3, a0
|
||||||
ret
|
ret
|
||||||
ENDPROC(__clear_user)
|
SYM_FUNC_END(__clear_user)
|
||||||
EXPORT_SYMBOL(__clear_user)
|
EXPORT_SYMBOL(__clear_user)
|
||||||
|
|
|
@ -8,16 +8,12 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro size, sym:req
|
|
||||||
.size \sym, . - \sym
|
|
||||||
.endm
|
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
|
||||||
.text
|
.text
|
||||||
|
|
||||||
.globl purgatory_start
|
SYM_CODE_START(purgatory_start)
|
||||||
purgatory_start:
|
|
||||||
|
|
||||||
lla sp, .Lstack
|
lla sp, .Lstack
|
||||||
mv s0, a0 /* The hartid of the current hart */
|
mv s0, a0 /* The hartid of the current hart */
|
||||||
|
@ -30,8 +26,7 @@ purgatory_start:
|
||||||
mv a1, s1
|
mv a1, s1
|
||||||
ld a2, riscv_kernel_entry
|
ld a2, riscv_kernel_entry
|
||||||
jr a2
|
jr a2
|
||||||
|
SYM_CODE_END(purgatory_start)
|
||||||
size purgatory_start
|
|
||||||
|
|
||||||
.align 4
|
.align 4
|
||||||
.rept 256
|
.rept 256
|
||||||
|
@ -42,9 +37,7 @@ size purgatory_start
|
||||||
.data
|
.data
|
||||||
|
|
||||||
.align LGREG
|
.align LGREG
|
||||||
.globl riscv_kernel_entry
|
|
||||||
riscv_kernel_entry:
|
SYM_DATA(riscv_kernel_entry, .quad 0)
|
||||||
.quad 0
|
|
||||||
size riscv_kernel_entry
|
|
||||||
|
|
||||||
.end
|
.end
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue