mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
[PATCH] x86_64: prefetch the mmap_sem in the fault path
In a micro-benchmark that stresses the pagefault path, the down_read_trylock on the mmap_sem showed up quite high on the profile. Turns out this lock is bouncing between cpus quite a bit and thus is cache-cold a lot. This patch prefetches the lock (for write) as early as possible (and before some other somewhat expensive operations). With this patch, the down_read_trylock basically fell out of the top of profile. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
4bc32c4d5c
commit
a9ba9a3b38
1 changed files with 4 additions and 2 deletions
|
@ -314,11 +314,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
siginfo_t info;
|
siginfo_t info;
|
||||||
|
|
||||||
|
tsk = current;
|
||||||
|
mm = tsk->mm;
|
||||||
|
prefetchw(&mm->mmap_sem);
|
||||||
|
|
||||||
/* get the address */
|
/* get the address */
|
||||||
__asm__("movq %%cr2,%0":"=r" (address));
|
__asm__("movq %%cr2,%0":"=r" (address));
|
||||||
|
|
||||||
tsk = current;
|
|
||||||
mm = tsk->mm;
|
|
||||||
info.si_code = SEGV_MAPERR;
|
info.si_code = SEGV_MAPERR;
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue