mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
There were quite a few overlapping sets of changes here. Daniel's bug fix for off-by-ones in the new BPF branch instructions, along with the added allowances for "data_end > ptr + x" forms collided with the metadata additions. Along with those three changes came veritifer test cases, which in their final form I tried to group together properly. If I had just trimmed GIT's conflict tags as-is, this would have split up the meta tests unnecessarily. In the socketmap code, a set of preemption disabling changes overlapped with the rename of bpf_compute_data_end() to bpf_compute_data_pointers(). Changes were made to the mv88e6060.c driver set addr method which got removed in net-next. The hyperv transport socket layer had a locking change in 'net' which overlapped with a change of socket state macro usage in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f8ddadc4db
415 changed files with 4551 additions and 2007 deletions
|
@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
|
||||||
still used for tmpfs etc. other users. If set to
|
still used for tmpfs etc. other users. If set to
|
||||||
false, the global swap readahead algorithm will be
|
false, the global swap readahead algorithm will be
|
||||||
used for all swappable pages.
|
used for all swappable pages.
|
||||||
|
|
||||||
What: /sys/kernel/mm/swap/vma_ra_max_order
|
|
||||||
Date: August 2017
|
|
||||||
Contact: Linux memory management mailing list <linux-mm@kvack.org>
|
|
||||||
Description: The max readahead size in order for VMA based swap readahead
|
|
||||||
|
|
||||||
VMA based swap readahead algorithm will readahead at
|
|
||||||
most 1 << max_order pages for each readahead. The
|
|
||||||
real readahead size for each readahead will be scaled
|
|
||||||
according to the estimation algorithm.
|
|
||||||
|
|
|
@ -352,44 +352,30 @@ Read-Copy Update (RCU)
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/rcupdate.h
|
.. kernel-doc:: include/linux/rcupdate.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/rcupdate_wait.h
|
.. kernel-doc:: include/linux/rcupdate_wait.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/rcutree.h
|
.. kernel-doc:: include/linux/rcutree.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: kernel/rcu/tree.c
|
.. kernel-doc:: kernel/rcu/tree.c
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: kernel/rcu/tree_plugin.h
|
.. kernel-doc:: kernel/rcu/tree_plugin.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: kernel/rcu/tree_exp.h
|
.. kernel-doc:: kernel/rcu/tree_exp.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: kernel/rcu/update.c
|
.. kernel-doc:: kernel/rcu/update.c
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/srcu.h
|
.. kernel-doc:: include/linux/srcu.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: kernel/rcu/srcutree.c
|
.. kernel-doc:: kernel/rcu/srcutree.c
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/rculist_bl.h
|
.. kernel-doc:: include/linux/rculist_bl.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/rculist.h
|
.. kernel-doc:: include/linux/rculist.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/rculist_nulls.h
|
.. kernel-doc:: include/linux/rculist_nulls.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/rcu_sync.h
|
.. kernel-doc:: include/linux/rcu_sync.h
|
||||||
:external:
|
|
||||||
|
|
||||||
.. kernel-doc:: kernel/rcu/sync.c
|
.. kernel-doc:: kernel/rcu/sync.c
|
||||||
:external:
|
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
|
||||||
submitting-patches
|
submitting-patches
|
||||||
coding-style
|
coding-style
|
||||||
email-clients
|
email-clients
|
||||||
|
kernel-enforcement-statement
|
||||||
|
|
||||||
Other guides to the community that are of interest to most developers are:
|
Other guides to the community that are of interest to most developers are:
|
||||||
|
|
||||||
|
|
147
Documentation/process/kernel-enforcement-statement.rst
Normal file
147
Documentation/process/kernel-enforcement-statement.rst
Normal file
|
@ -0,0 +1,147 @@
|
||||||
|
Linux Kernel Enforcement Statement
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
As developers of the Linux kernel, we have a keen interest in how our software
|
||||||
|
is used and how the license for our software is enforced. Compliance with the
|
||||||
|
reciprocal sharing obligations of GPL-2.0 is critical to the long-term
|
||||||
|
sustainability of our software and community.
|
||||||
|
|
||||||
|
Although there is a right to enforce the separate copyright interests in the
|
||||||
|
contributions made to our community, we share an interest in ensuring that
|
||||||
|
individual enforcement actions are conducted in a manner that benefits our
|
||||||
|
community and do not have an unintended negative impact on the health and
|
||||||
|
growth of our software ecosystem. In order to deter unhelpful enforcement
|
||||||
|
actions, we agree that it is in the best interests of our development
|
||||||
|
community to undertake the following commitment to users of the Linux kernel
|
||||||
|
on behalf of ourselves and any successors to our copyright interests:
|
||||||
|
|
||||||
|
Notwithstanding the termination provisions of the GPL-2.0, we agree that
|
||||||
|
it is in the best interests of our development community to adopt the
|
||||||
|
following provisions of GPL-3.0 as additional permissions under our
|
||||||
|
license with respect to any non-defensive assertion of rights under the
|
||||||
|
license.
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your license
|
||||||
|
from a particular copyright holder is reinstated (a) provisionally,
|
||||||
|
unless and until the copyright holder explicitly and finally
|
||||||
|
terminates your license, and (b) permanently, if the copyright holder
|
||||||
|
fails to notify you of the violation by some reasonable means prior to
|
||||||
|
60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Our intent in providing these assurances is to encourage more use of the
|
||||||
|
software. We want companies and individuals to use, modify and distribute
|
||||||
|
this software. We want to work with users in an open and transparent way to
|
||||||
|
eliminate any uncertainty about our expectations regarding compliance or
|
||||||
|
enforcement that might limit adoption of our software. We view legal action
|
||||||
|
as a last resort, to be initiated only when other community efforts have
|
||||||
|
failed to resolve the problem.
|
||||||
|
|
||||||
|
Finally, once a non-compliance issue is resolved, we hope the user will feel
|
||||||
|
welcome to join us in our efforts on this project. Working together, we will
|
||||||
|
be stronger.
|
||||||
|
|
||||||
|
Except where noted below, we speak only for ourselves, and not for any company
|
||||||
|
we might work for today, have in the past, or will in the future.
|
||||||
|
|
||||||
|
- Bjorn Andersson (Linaro)
|
||||||
|
- Andrea Arcangeli (Red Hat)
|
||||||
|
- Neil Armstrong
|
||||||
|
- Jens Axboe
|
||||||
|
- Pablo Neira Ayuso
|
||||||
|
- Khalid Aziz
|
||||||
|
- Ralf Baechle
|
||||||
|
- Felipe Balbi
|
||||||
|
- Arnd Bergmann
|
||||||
|
- Ard Biesheuvel
|
||||||
|
- Paolo Bonzini (Red Hat)
|
||||||
|
- Christian Borntraeger
|
||||||
|
- Mark Brown (Linaro)
|
||||||
|
- Paul Burton
|
||||||
|
- Javier Martinez Canillas
|
||||||
|
- Rob Clark
|
||||||
|
- Jonathan Corbet
|
||||||
|
- Vivien Didelot (Savoir-faire Linux)
|
||||||
|
- Hans de Goede (Red Hat)
|
||||||
|
- Mel Gorman (SUSE)
|
||||||
|
- Sven Eckelmann
|
||||||
|
- Alex Elder (Linaro)
|
||||||
|
- Fabio Estevam
|
||||||
|
- Larry Finger
|
||||||
|
- Bhumika Goyal
|
||||||
|
- Andy Gross
|
||||||
|
- Juergen Gross
|
||||||
|
- Shawn Guo
|
||||||
|
- Ulf Hansson
|
||||||
|
- Tejun Heo
|
||||||
|
- Rob Herring
|
||||||
|
- Masami Hiramatsu
|
||||||
|
- Michal Hocko
|
||||||
|
- Simon Horman
|
||||||
|
- Johan Hovold (Hovold Consulting AB)
|
||||||
|
- Christophe JAILLET
|
||||||
|
- Olof Johansson
|
||||||
|
- Lee Jones (Linaro)
|
||||||
|
- Heiner Kallweit
|
||||||
|
- Srinivas Kandagatla
|
||||||
|
- Jan Kara
|
||||||
|
- Shuah Khan (Samsung)
|
||||||
|
- David Kershner
|
||||||
|
- Jaegeuk Kim
|
||||||
|
- Namhyung Kim
|
||||||
|
- Colin Ian King
|
||||||
|
- Jeff Kirsher
|
||||||
|
- Greg Kroah-Hartman (Linux Foundation)
|
||||||
|
- Christian König
|
||||||
|
- Vinod Koul
|
||||||
|
- Krzysztof Kozlowski
|
||||||
|
- Viresh Kumar
|
||||||
|
- Aneesh Kumar K.V
|
||||||
|
- Julia Lawall
|
||||||
|
- Doug Ledford (Red Hat)
|
||||||
|
- Chuck Lever (Oracle)
|
||||||
|
- Daniel Lezcano
|
||||||
|
- Shaohua Li
|
||||||
|
- Xin Long (Red Hat)
|
||||||
|
- Tony Luck
|
||||||
|
- Mike Marshall
|
||||||
|
- Chris Mason
|
||||||
|
- Paul E. McKenney
|
||||||
|
- David S. Miller
|
||||||
|
- Ingo Molnar
|
||||||
|
- Kuninori Morimoto
|
||||||
|
- Borislav Petkov
|
||||||
|
- Jiri Pirko
|
||||||
|
- Josh Poimboeuf
|
||||||
|
- Sebastian Reichel (Collabora)
|
||||||
|
- Guenter Roeck
|
||||||
|
- Joerg Roedel
|
||||||
|
- Leon Romanovsky
|
||||||
|
- Steven Rostedt (VMware)
|
||||||
|
- Ivan Safonov
|
||||||
|
- Ivan Safonov
|
||||||
|
- Anna Schumaker
|
||||||
|
- Jes Sorensen
|
||||||
|
- K.Y. Srinivasan
|
||||||
|
- Heiko Stuebner
|
||||||
|
- Jiri Kosina (SUSE)
|
||||||
|
- Dmitry Torokhov
|
||||||
|
- Linus Torvalds
|
||||||
|
- Thierry Reding
|
||||||
|
- Rik van Riel
|
||||||
|
- Geert Uytterhoeven (Glider bvba)
|
||||||
|
- Daniel Vetter
|
||||||
|
- Linus Walleij
|
||||||
|
- Richard Weinberger
|
||||||
|
- Dan Williams
|
||||||
|
- Rafael J. Wysocki
|
||||||
|
- Arvind Yadav
|
||||||
|
- Masahiro Yamada
|
||||||
|
- Wei Yongjun
|
||||||
|
- Lv Zheng
|
|
@ -5351,9 +5351,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org>
|
||||||
L: linux-fsdevel@vger.kernel.org
|
L: linux-fsdevel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: include/linux/fcntl.h
|
F: include/linux/fcntl.h
|
||||||
F: include/linux/fs.h
|
|
||||||
F: include/uapi/linux/fcntl.h
|
F: include/uapi/linux/fcntl.h
|
||||||
F: include/uapi/linux/fs.h
|
|
||||||
F: fs/fcntl.c
|
F: fs/fcntl.c
|
||||||
F: fs/locks.c
|
F: fs/locks.c
|
||||||
|
|
||||||
|
@ -5362,6 +5360,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk>
|
||||||
L: linux-fsdevel@vger.kernel.org
|
L: linux-fsdevel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: fs/*
|
F: fs/*
|
||||||
|
F: include/linux/fs.h
|
||||||
|
F: include/uapi/linux/fs.h
|
||||||
|
|
||||||
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
|
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
|
||||||
M: Riku Voipio <riku.voipio@iki.fi>
|
M: Riku Voipio <riku.voipio@iki.fi>
|
||||||
|
@ -7576,7 +7576,7 @@ F: arch/mips/include/asm/kvm*
|
||||||
F: arch/mips/kvm/
|
F: arch/mips/kvm/
|
||||||
|
|
||||||
KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
|
KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
|
||||||
M: Alexander Graf <agraf@suse.com>
|
M: Paul Mackerras <paulus@ozlabs.org>
|
||||||
L: kvm-ppc@vger.kernel.org
|
L: kvm-ppc@vger.kernel.org
|
||||||
W: http://www.linux-kvm.org/
|
W: http://www.linux-kvm.org/
|
||||||
T: git git://github.com/agraf/linux-2.6.git
|
T: git git://github.com/agraf/linux-2.6.git
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 14
|
PATCHLEVEL = 14
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Fearless Coyote
|
NAME = Fearless Coyote
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION
|
||||||
ifeq ($(has_libelf),1)
|
ifeq ($(has_libelf),1)
|
||||||
objtool_target := tools/objtool FORCE
|
objtool_target := tools/objtool FORCE
|
||||||
else
|
else
|
||||||
$(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
|
ifdef CONFIG_ORC_UNWINDER
|
||||||
|
$(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
|
||||||
|
else
|
||||||
|
$(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
|
||||||
|
endif
|
||||||
SKIP_STACK_VALIDATION := 1
|
SKIP_STACK_VALIDATION := 1
|
||||||
export SKIP_STACK_VALIDATION
|
export SKIP_STACK_VALIDATION
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -131,7 +131,7 @@ endif
|
||||||
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
|
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
|
||||||
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
|
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
|
||||||
|
|
||||||
CHECKFLAGS += -D__arm__
|
CHECKFLAGS += -D__arm__ -m32
|
||||||
|
|
||||||
#Default value
|
#Default value
|
||||||
head-y := arch/arm/kernel/head$(MMUEXT).o
|
head-y := arch/arm/kernel/head$(MMUEXT).o
|
||||||
|
|
|
@ -23,7 +23,11 @@ ENTRY(putc)
|
||||||
strb r0, [r1]
|
strb r0, [r1]
|
||||||
mov r0, #0x03 @ SYS_WRITEC
|
mov r0, #0x03 @ SYS_WRITEC
|
||||||
ARM( svc #0x123456 )
|
ARM( svc #0x123456 )
|
||||||
|
#ifdef CONFIG_CPU_V7M
|
||||||
|
THUMB( bkpt #0xab )
|
||||||
|
#else
|
||||||
THUMB( svc #0xab )
|
THUMB( svc #0xab )
|
||||||
|
#endif
|
||||||
mov pc, lr
|
mov pc, lr
|
||||||
.align 2
|
.align 2
|
||||||
1: .word _GLOBAL_OFFSET_TABLE_ - .
|
1: .word _GLOBAL_OFFSET_TABLE_ - .
|
||||||
|
|
|
@ -178,7 +178,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
i2c0: i2c@11000 {
|
i2c0: i2c@11000 {
|
||||||
compatible = "marvell,mv64xxx-i2c";
|
compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
|
||||||
reg = <0x11000 0x20>;
|
reg = <0x11000 0x20>;
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
@ -189,7 +189,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
i2c1: i2c@11100 {
|
i2c1: i2c@11100 {
|
||||||
compatible = "marvell,mv64xxx-i2c";
|
compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
|
||||||
reg = <0x11100 0x20>;
|
reg = <0x11100 0x20>;
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
|
|
@ -67,8 +67,8 @@
|
||||||
pinctrl-0 = <&pinctrl_macb0_default>;
|
pinctrl-0 = <&pinctrl_macb0_default>;
|
||||||
phy-mode = "rmii";
|
phy-mode = "rmii";
|
||||||
|
|
||||||
ethernet-phy@1 {
|
ethernet-phy@0 {
|
||||||
reg = <0x1>;
|
reg = <0x0>;
|
||||||
interrupt-parent = <&pioA>;
|
interrupt-parent = <&pioA>;
|
||||||
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
|
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
|
|
|
@ -309,7 +309,7 @@
|
||||||
vddana-supply = <&vdd_3v3_lp_reg>;
|
vddana-supply = <&vdd_3v3_lp_reg>;
|
||||||
vref-supply = <&vdd_3v3_lp_reg>;
|
vref-supply = <&vdd_3v3_lp_reg>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pinctrl_adc_default>;
|
pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -340,6 +340,20 @@
|
||||||
bias-disable;
|
bias-disable;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The ADTRG pin can work on any edge type.
|
||||||
|
* In here it's being pulled up, so need to
|
||||||
|
* connect it to ground to get an edge e.g.
|
||||||
|
* Trigger can be configured on falling, rise
|
||||||
|
* or any edge, and the pull-up can be changed
|
||||||
|
* to pull-down or left floating according to
|
||||||
|
* needs.
|
||||||
|
*/
|
||||||
|
pinctrl_adtrg_default: adtrg_default {
|
||||||
|
pinmux = <PIN_PD31__ADTRG>;
|
||||||
|
bias-pull-up;
|
||||||
|
};
|
||||||
|
|
||||||
pinctrl_charger_chglev: charger_chglev {
|
pinctrl_charger_chglev: charger_chglev {
|
||||||
pinmux = <PIN_PA12__GPIO>;
|
pinmux = <PIN_PA12__GPIO>;
|
||||||
bias-disable;
|
bias-disable;
|
||||||
|
|
|
@ -18,12 +18,9 @@
|
||||||
compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
|
compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
|
||||||
model = "Raspberry Pi Zero W";
|
model = "Raspberry Pi Zero W";
|
||||||
|
|
||||||
/* Needed by firmware to properly init UARTs */
|
chosen {
|
||||||
aliases {
|
/* 8250 auxiliary UART instead of pl011 */
|
||||||
uart0 = "/soc/serial@7e201000";
|
stdout-path = "serial1:115200n8";
|
||||||
uart1 = "/soc/serial@7e215040";
|
|
||||||
serial0 = "/soc/serial@7e201000";
|
|
||||||
serial1 = "/soc/serial@7e215040";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
leds {
|
leds {
|
||||||
|
|
|
@ -8,6 +8,11 @@
|
||||||
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
|
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
|
||||||
model = "Raspberry Pi 3 Model B";
|
model = "Raspberry Pi 3 Model B";
|
||||||
|
|
||||||
|
chosen {
|
||||||
|
/* 8250 auxiliary UART instead of pl011 */
|
||||||
|
stdout-path = "serial1:115200n8";
|
||||||
|
};
|
||||||
|
|
||||||
memory {
|
memory {
|
||||||
reg = <0 0x40000000>;
|
reg = <0 0x40000000>;
|
||||||
};
|
};
|
||||||
|
|
|
@ -20,8 +20,13 @@
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
|
||||||
|
aliases {
|
||||||
|
serial0 = &uart0;
|
||||||
|
serial1 = &uart1;
|
||||||
|
};
|
||||||
|
|
||||||
chosen {
|
chosen {
|
||||||
bootargs = "earlyprintk console=ttyAMA0";
|
stdout-path = "serial0:115200n8";
|
||||||
};
|
};
|
||||||
|
|
||||||
thermal-zones {
|
thermal-zones {
|
||||||
|
|
|
@ -145,11 +145,12 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
watchdog@41000000 {
|
watchdog@41000000 {
|
||||||
compatible = "cortina,gemini-watchdog";
|
compatible = "cortina,gemini-watchdog", "faraday,ftwdt010";
|
||||||
reg = <0x41000000 0x1000>;
|
reg = <0x41000000 0x1000>;
|
||||||
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
resets = <&syscon GEMINI_RESET_WDOG>;
|
resets = <&syscon GEMINI_RESET_WDOG>;
|
||||||
clocks = <&syscon GEMINI_CLK_APB>;
|
clocks = <&syscon GEMINI_CLK_APB>;
|
||||||
|
clock-names = "PCLK";
|
||||||
};
|
};
|
||||||
|
|
||||||
uart0: serial@42000000 {
|
uart0: serial@42000000 {
|
||||||
|
|
|
@ -144,10 +144,10 @@
|
||||||
interrupt-names = "msi";
|
interrupt-names = "msi";
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
interrupt-map-mask = <0 0 0 0x7>;
|
interrupt-map-mask = <0 0 0 0x7>;
|
||||||
interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
|
interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
|
<0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
|
<0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
|
<0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
|
clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
|
||||||
<&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
|
<&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
|
||||||
<&clks IMX7D_PCIE_PHY_ROOT_CLK>;
|
<&clks IMX7D_PCIE_PHY_ROOT_CLK>;
|
||||||
|
|
|
@ -87,9 +87,10 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
watchdog: watchdog@98500000 {
|
watchdog: watchdog@98500000 {
|
||||||
compatible = "moxa,moxart-watchdog";
|
compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
|
||||||
reg = <0x98500000 0x10>;
|
reg = <0x98500000 0x10>;
|
||||||
clocks = <&clk_apb>;
|
clocks = <&clk_apb>;
|
||||||
|
clock-names = "PCLK";
|
||||||
};
|
};
|
||||||
|
|
||||||
sdhci: sdhci@98e00000 {
|
sdhci: sdhci@98e00000 {
|
||||||
|
|
|
@ -1430,6 +1430,7 @@
|
||||||
atmel,min-sample-rate-hz = <200000>;
|
atmel,min-sample-rate-hz = <200000>;
|
||||||
atmel,max-sample-rate-hz = <20000000>;
|
atmel,max-sample-rate-hz = <20000000>;
|
||||||
atmel,startup-time-ms = <4>;
|
atmel,startup-time-ms = <4>;
|
||||||
|
atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -311,8 +311,8 @@
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
|
|
||||||
tcon1_in_drc1: endpoint@0 {
|
tcon1_in_drc1: endpoint@1 {
|
||||||
reg = <0>;
|
reg = <1>;
|
||||||
remote-endpoint = <&drc1_out_tcon1>;
|
remote-endpoint = <&drc1_out_tcon1>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -1012,8 +1012,8 @@
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
reg = <1>;
|
reg = <1>;
|
||||||
|
|
||||||
be1_out_drc1: endpoint@0 {
|
be1_out_drc1: endpoint@1 {
|
||||||
reg = <0>;
|
reg = <1>;
|
||||||
remote-endpoint = <&drc1_in_be1>;
|
remote-endpoint = <&drc1_in_be1>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -1042,8 +1042,8 @@
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
|
|
||||||
drc1_in_be1: endpoint@0 {
|
drc1_in_be1: endpoint@1 {
|
||||||
reg = <0>;
|
reg = <1>;
|
||||||
remote-endpoint = <&be1_out_drc1>;
|
remote-endpoint = <&be1_out_drc1>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -1053,8 +1053,8 @@
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
reg = <1>;
|
reg = <1>;
|
||||||
|
|
||||||
drc1_out_tcon1: endpoint@0 {
|
drc1_out_tcon1: endpoint@1 {
|
||||||
reg = <0>;
|
reg = <1>;
|
||||||
remote-endpoint = <&tcon1_in_drc1>;
|
remote-endpoint = <&tcon1_in_drc1>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -115,7 +115,11 @@ ENTRY(printascii)
|
||||||
mov r1, r0
|
mov r1, r0
|
||||||
mov r0, #0x04 @ SYS_WRITE0
|
mov r0, #0x04 @ SYS_WRITE0
|
||||||
ARM( svc #0x123456 )
|
ARM( svc #0x123456 )
|
||||||
|
#ifdef CONFIG_CPU_V7M
|
||||||
|
THUMB( bkpt #0xab )
|
||||||
|
#else
|
||||||
THUMB( svc #0xab )
|
THUMB( svc #0xab )
|
||||||
|
#endif
|
||||||
ret lr
|
ret lr
|
||||||
ENDPROC(printascii)
|
ENDPROC(printascii)
|
||||||
|
|
||||||
|
@ -124,7 +128,11 @@ ENTRY(printch)
|
||||||
strb r0, [r1]
|
strb r0, [r1]
|
||||||
mov r0, #0x03 @ SYS_WRITEC
|
mov r0, #0x03 @ SYS_WRITEC
|
||||||
ARM( svc #0x123456 )
|
ARM( svc #0x123456 )
|
||||||
|
#ifdef CONFIG_CPU_V7M
|
||||||
|
THUMB( bkpt #0xab )
|
||||||
|
#else
|
||||||
THUMB( svc #0xab )
|
THUMB( svc #0xab )
|
||||||
|
#endif
|
||||||
ret lr
|
ret lr
|
||||||
ENDPROC(printch)
|
ENDPROC(printch)
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
|
|
||||||
#include "db8500-regs.h"
|
#include "db8500-regs.h"
|
||||||
|
#include "pm_domains.h"
|
||||||
|
|
||||||
static int __init ux500_l2x0_unlock(void)
|
static int __init ux500_l2x0_unlock(void)
|
||||||
{
|
{
|
||||||
|
@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
|
||||||
|
|
||||||
static void __init u8500_init_machine(void)
|
static void __init u8500_init_machine(void)
|
||||||
{
|
{
|
||||||
|
/* Initialize ux500 power domains */
|
||||||
|
ux500_pm_domains_init();
|
||||||
|
|
||||||
/* automatically probe child nodes of dbx5x0 devices */
|
/* automatically probe child nodes of dbx5x0 devices */
|
||||||
if (of_machine_is_compatible("st-ericsson,u8540"))
|
if (of_machine_is_compatible("st-ericsson,u8540"))
|
||||||
of_platform_populate(NULL, u8500_local_bus_nodes,
|
of_platform_populate(NULL, u8500_local_bus_nodes,
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
|
|
||||||
#include "db8500-regs.h"
|
#include "db8500-regs.h"
|
||||||
#include "pm_domains.h"
|
|
||||||
|
|
||||||
/* ARM WFI Standby signal register */
|
/* ARM WFI Standby signal register */
|
||||||
#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130)
|
#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130)
|
||||||
|
@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size)
|
||||||
|
|
||||||
/* Set up ux500 suspend callbacks. */
|
/* Set up ux500 suspend callbacks. */
|
||||||
suspend_set_ops(UX500_SUSPEND_OPS);
|
suspend_set_ops(UX500_SUSPEND_OPS);
|
||||||
|
|
||||||
/* Initialize ux500 power domains */
|
|
||||||
ux500_pm_domains_init();
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void)
|
||||||
* reserved here.
|
* reserved here.
|
||||||
*/
|
*/
|
||||||
#endif
|
#endif
|
||||||
|
/*
|
||||||
|
* In any case, always ensure address 0 is never used as many things
|
||||||
|
* get very confused if 0 is returned as a legitimate address.
|
||||||
|
*/
|
||||||
|
memblock_reserve(0, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init adjust_lowmem_bounds(void)
|
void __init adjust_lowmem_bounds(void)
|
||||||
|
|
|
@ -61,13 +61,6 @@
|
||||||
chosen {
|
chosen {
|
||||||
stdout-path = "serial0:115200n8";
|
stdout-path = "serial0:115200n8";
|
||||||
};
|
};
|
||||||
|
|
||||||
reg_vcc3v3: vcc3v3 {
|
|
||||||
compatible = "regulator-fixed";
|
|
||||||
regulator-name = "vcc3v3";
|
|
||||||
regulator-min-microvolt = <3300000>;
|
|
||||||
regulator-max-microvolt = <3300000>;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
&ehci0 {
|
&ehci0 {
|
||||||
|
@ -91,7 +84,7 @@
|
||||||
&mmc0 {
|
&mmc0 {
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&mmc0_pins>;
|
pinctrl-0 = <&mmc0_pins>;
|
||||||
vmmc-supply = <®_vcc3v3>;
|
vmmc-supply = <®_dcdc1>;
|
||||||
cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
|
cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
|
||||||
cd-inverted;
|
cd-inverted;
|
||||||
disable-wp;
|
disable-wp;
|
||||||
|
|
|
@ -336,7 +336,7 @@
|
||||||
/* non-prefetchable memory */
|
/* non-prefetchable memory */
|
||||||
0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>;
|
0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>;
|
||||||
interrupt-map-mask = <0 0 0 0>;
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
|
interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
clocks = <&cpm_clk 1 13>;
|
clocks = <&cpm_clk 1 13>;
|
||||||
|
@ -362,7 +362,7 @@
|
||||||
/* non-prefetchable memory */
|
/* non-prefetchable memory */
|
||||||
0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>;
|
0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>;
|
||||||
interrupt-map-mask = <0 0 0 0>;
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
|
interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
|
@ -389,7 +389,7 @@
|
||||||
/* non-prefetchable memory */
|
/* non-prefetchable memory */
|
||||||
0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>;
|
0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>;
|
||||||
interrupt-map-mask = <0 0 0 0>;
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
|
interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
|
|
|
@ -335,7 +335,7 @@
|
||||||
/* non-prefetchable memory */
|
/* non-prefetchable memory */
|
||||||
0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
|
0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
|
||||||
interrupt-map-mask = <0 0 0 0>;
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
|
interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
clocks = <&cps_clk 1 13>;
|
clocks = <&cps_clk 1 13>;
|
||||||
|
@ -361,7 +361,7 @@
|
||||||
/* non-prefetchable memory */
|
/* non-prefetchable memory */
|
||||||
0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
|
0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
|
||||||
interrupt-map-mask = <0 0 0 0>;
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
|
interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
|
@ -388,7 +388,7 @@
|
||||||
/* non-prefetchable memory */
|
/* non-prefetchable memory */
|
||||||
0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
|
0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
|
||||||
interrupt-map-mask = <0 0 0 0>;
|
interrupt-map-mask = <0 0 0 0>;
|
||||||
interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
|
interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
|
||||||
num-lanes = <1>;
|
num-lanes = <1>;
|
||||||
|
|
|
@ -62,6 +62,7 @@
|
||||||
brightness-levels = <256 128 64 16 8 4 0>;
|
brightness-levels = <256 128 64 16 8 4 0>;
|
||||||
default-brightness-level = <6>;
|
default-brightness-level = <6>;
|
||||||
|
|
||||||
|
power-supply = <®_12v>;
|
||||||
enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
|
enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -83,6 +84,15 @@
|
||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
reg_12v: regulator2 {
|
||||||
|
compatible = "regulator-fixed";
|
||||||
|
regulator-name = "fixed-12V";
|
||||||
|
regulator-min-microvolt = <12000000>;
|
||||||
|
regulator-max-microvolt = <12000000>;
|
||||||
|
regulator-boot-on;
|
||||||
|
regulator-always-on;
|
||||||
|
};
|
||||||
|
|
||||||
rsnd_ak4613: sound {
|
rsnd_ak4613: sound {
|
||||||
compatible = "simple-audio-card";
|
compatible = "simple-audio-card";
|
||||||
|
|
||||||
|
|
|
@ -582,7 +582,7 @@
|
||||||
vop_mmu: iommu@ff373f00 {
|
vop_mmu: iommu@ff373f00 {
|
||||||
compatible = "rockchip,iommu";
|
compatible = "rockchip,iommu";
|
||||||
reg = <0x0 0xff373f00 0x0 0x100>;
|
reg = <0x0 0xff373f00 0x0 0x100>;
|
||||||
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
|
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "vop_mmu";
|
interrupt-names = "vop_mmu";
|
||||||
#iommu-cells = <0>;
|
#iommu-cells = <0>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
|
|
@ -740,7 +740,7 @@
|
||||||
iep_mmu: iommu@ff900800 {
|
iep_mmu: iommu@ff900800 {
|
||||||
compatible = "rockchip,iommu";
|
compatible = "rockchip,iommu";
|
||||||
reg = <0x0 0xff900800 0x0 0x100>;
|
reg = <0x0 0xff900800 0x0 0x100>;
|
||||||
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
|
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "iep_mmu";
|
interrupt-names = "iep_mmu";
|
||||||
#iommu-cells = <0>;
|
#iommu-cells = <0>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
|
|
@ -371,10 +371,10 @@
|
||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
regulator-boot-on;
|
regulator-boot-on;
|
||||||
regulator-min-microvolt = <1800000>;
|
regulator-min-microvolt = <1800000>;
|
||||||
regulator-max-microvolt = <3300000>;
|
regulator-max-microvolt = <3000000>;
|
||||||
regulator-state-mem {
|
regulator-state-mem {
|
||||||
regulator-on-in-suspend;
|
regulator-on-in-suspend;
|
||||||
regulator-suspend-microvolt = <3300000>;
|
regulator-suspend-microvolt = <3000000>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -325,12 +325,12 @@
|
||||||
vcc_sd: LDO_REG4 {
|
vcc_sd: LDO_REG4 {
|
||||||
regulator-name = "vcc_sd";
|
regulator-name = "vcc_sd";
|
||||||
regulator-min-microvolt = <1800000>;
|
regulator-min-microvolt = <1800000>;
|
||||||
regulator-max-microvolt = <3300000>;
|
regulator-max-microvolt = <3000000>;
|
||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
regulator-boot-on;
|
regulator-boot-on;
|
||||||
regulator-state-mem {
|
regulator-state-mem {
|
||||||
regulator-on-in-suspend;
|
regulator-on-in-suspend;
|
||||||
regulator-suspend-microvolt = <3300000>;
|
regulator-suspend-microvolt = <3000000>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -315,10 +315,10 @@
|
||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
regulator-boot-on;
|
regulator-boot-on;
|
||||||
regulator-min-microvolt = <1800000>;
|
regulator-min-microvolt = <1800000>;
|
||||||
regulator-max-microvolt = <3300000>;
|
regulator-max-microvolt = <3000000>;
|
||||||
regulator-state-mem {
|
regulator-state-mem {
|
||||||
regulator-on-in-suspend;
|
regulator-on-in-suspend;
|
||||||
regulator-suspend-microvolt = <3300000>;
|
regulator-suspend-microvolt = <3000000>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||||
return __cmpxchg_small(ptr, old, new, size);
|
return __cmpxchg_small(ptr, old, new, size);
|
||||||
|
|
||||||
case 4:
|
case 4:
|
||||||
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
|
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
|
||||||
|
(u32)old, new);
|
||||||
|
|
||||||
case 8:
|
case 8:
|
||||||
/* lld/scd are only available for MIPS64 */
|
/* lld/scd are only available for MIPS64 */
|
||||||
if (!IS_ENABLED(CONFIG_64BIT))
|
if (!IS_ENABLED(CONFIG_64BIT))
|
||||||
return __cmpxchg_called_with_bad_pointer();
|
return __cmpxchg_called_with_bad_pointer();
|
||||||
|
|
||||||
return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new);
|
return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
|
||||||
|
(u64)old, new);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return __cmpxchg_called_with_bad_pointer();
|
return __cmpxchg_called_with_bad_pointer();
|
||||||
|
|
|
@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct plat_stmmacenet_data ls1x_eth0_pdata = {
|
static struct plat_stmmacenet_data ls1x_eth0_pdata = {
|
||||||
.bus_id = 0,
|
.bus_id = 0,
|
||||||
.phy_addr = -1,
|
.phy_addr = -1,
|
||||||
#if defined(CONFIG_LOONGSON1_LS1B)
|
#if defined(CONFIG_LOONGSON1_LS1B)
|
||||||
.interface = PHY_INTERFACE_MODE_MII,
|
.interface = PHY_INTERFACE_MODE_MII,
|
||||||
#elif defined(CONFIG_LOONGSON1_LS1C)
|
#elif defined(CONFIG_LOONGSON1_LS1C)
|
||||||
.interface = PHY_INTERFACE_MODE_RMII,
|
.interface = PHY_INTERFACE_MODE_RMII,
|
||||||
#endif
|
#endif
|
||||||
.mdio_bus_data = &ls1x_mdio_bus_data,
|
.mdio_bus_data = &ls1x_mdio_bus_data,
|
||||||
.dma_cfg = &ls1x_eth_dma_cfg,
|
.dma_cfg = &ls1x_eth_dma_cfg,
|
||||||
.has_gmac = 1,
|
.has_gmac = 1,
|
||||||
.tx_coe = 1,
|
.tx_coe = 1,
|
||||||
.init = ls1x_eth_mux_init,
|
.rx_queues_to_use = 1,
|
||||||
|
.tx_queues_to_use = 1,
|
||||||
|
.init = ls1x_eth_mux_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource ls1x_eth0_resources[] = {
|
static struct resource ls1x_eth0_resources[] = {
|
||||||
|
@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
|
||||||
|
|
||||||
#ifdef CONFIG_LOONGSON1_LS1B
|
#ifdef CONFIG_LOONGSON1_LS1B
|
||||||
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
|
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
|
||||||
.bus_id = 1,
|
.bus_id = 1,
|
||||||
.phy_addr = -1,
|
.phy_addr = -1,
|
||||||
.interface = PHY_INTERFACE_MODE_MII,
|
.interface = PHY_INTERFACE_MODE_MII,
|
||||||
.mdio_bus_data = &ls1x_mdio_bus_data,
|
.mdio_bus_data = &ls1x_mdio_bus_data,
|
||||||
.dma_cfg = &ls1x_eth_dma_cfg,
|
.dma_cfg = &ls1x_eth_dma_cfg,
|
||||||
.has_gmac = 1,
|
.has_gmac = 1,
|
||||||
.tx_coe = 1,
|
.tx_coe = 1,
|
||||||
.init = ls1x_eth_mux_init,
|
.rx_queues_to_use = 1,
|
||||||
|
.tx_queues_to_use = 1,
|
||||||
|
.init = ls1x_eth_mux_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource ls1x_eth1_resources[] = {
|
static struct resource ls1x_eth1_resources[] = {
|
||||||
|
|
|
@ -2558,7 +2558,6 @@ dcopuop:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* Reserved R6 ops */
|
/* Reserved R6 ops */
|
||||||
pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
|
|
||||||
return SIGILL;
|
return SIGILL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2719,7 +2718,6 @@ dcopuop:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* Reserved R6 ops */
|
/* Reserved R6 ops */
|
||||||
pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
|
|
||||||
return SIGILL;
|
return SIGILL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||||
{
|
{
|
||||||
int src, dst, r, td, ts, mem_off, b_off;
|
int src, dst, r, td, ts, mem_off, b_off;
|
||||||
bool need_swap, did_move, cmp_eq;
|
bool need_swap, did_move, cmp_eq;
|
||||||
unsigned int target;
|
unsigned int target = 0;
|
||||||
u64 t64;
|
u64 t64;
|
||||||
s64 t64s;
|
s64 t64s;
|
||||||
int bpf_op = BPF_OP(insn->code);
|
int bpf_op = BPF_OP(insn->code);
|
||||||
|
|
|
@ -30,8 +30,6 @@ cfg="$4"
|
||||||
boards_origin="$5"
|
boards_origin="$5"
|
||||||
shift 5
|
shift 5
|
||||||
|
|
||||||
cd "${srctree}"
|
|
||||||
|
|
||||||
# Only print Skipping... lines if the user explicitly specified BOARDS=. In the
|
# Only print Skipping... lines if the user explicitly specified BOARDS=. In the
|
||||||
# general case it only serves to obscure the useful output about what actually
|
# general case it only serves to obscure the useful output about what actually
|
||||||
# was included.
|
# was included.
|
||||||
|
@ -48,7 +46,7 @@ environment*)
|
||||||
esac
|
esac
|
||||||
|
|
||||||
for board in $@; do
|
for board in $@; do
|
||||||
board_cfg="arch/mips/configs/generic/board-${board}.config"
|
board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
|
||||||
if [ ! -f "${board_cfg}" ]; then
|
if [ ! -f "${board_cfg}" ]; then
|
||||||
echo "WARNING: Board config '${board_cfg}' not found"
|
echo "WARNING: Board config '${board_cfg}' not found"
|
||||||
continue
|
continue
|
||||||
|
@ -84,7 +82,7 @@ for board in $@; do
|
||||||
done || continue
|
done || continue
|
||||||
|
|
||||||
# Merge this board config fragment into our final config file
|
# Merge this board config fragment into our final config file
|
||||||
./scripts/kconfig/merge_config.sh \
|
${srctree}/scripts/kconfig/merge_config.sh \
|
||||||
-m -O ${objtree} ${cfg} ${board_cfg} \
|
-m -O ${objtree} ${cfg} ${board_cfg} \
|
||||||
| grep -Ev '^(#|Using)'
|
| grep -Ev '^(#|Using)'
|
||||||
done
|
done
|
||||||
|
|
|
@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
|
||||||
EXPORT_SYMBOL(__xchg8);
|
EXPORT_SYMBOL(__xchg8);
|
||||||
EXPORT_SYMBOL(__xchg32);
|
EXPORT_SYMBOL(__xchg32);
|
||||||
EXPORT_SYMBOL(__cmpxchg_u32);
|
EXPORT_SYMBOL(__cmpxchg_u32);
|
||||||
|
EXPORT_SYMBOL(__cmpxchg_u64);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
EXPORT_SYMBOL(__atomic_hash);
|
EXPORT_SYMBOL(__atomic_hash);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
EXPORT_SYMBOL(__xchg64);
|
EXPORT_SYMBOL(__xchg64);
|
||||||
EXPORT_SYMBOL(__cmpxchg_u64);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
|
@ -742,7 +742,7 @@ lws_compare_and_swap_2:
|
||||||
10: ldd 0(%r25), %r25
|
10: ldd 0(%r25), %r25
|
||||||
11: ldd 0(%r24), %r24
|
11: ldd 0(%r24), %r24
|
||||||
#else
|
#else
|
||||||
/* Load new value into r22/r23 - high/low */
|
/* Load old value into r22/r23 - high/low */
|
||||||
10: ldw 0(%r25), %r22
|
10: ldw 0(%r25), %r22
|
||||||
11: ldw 4(%r25), %r23
|
11: ldw 4(%r25), %r23
|
||||||
/* Load new value into fr4 for atomic store later */
|
/* Load new value into fr4 for atomic store later */
|
||||||
|
@ -834,11 +834,11 @@ cas2_action:
|
||||||
copy %r0, %r28
|
copy %r0, %r28
|
||||||
#else
|
#else
|
||||||
/* Compare first word */
|
/* Compare first word */
|
||||||
19: ldw,ma 0(%r26), %r29
|
19: ldw 0(%r26), %r29
|
||||||
sub,= %r29, %r22, %r0
|
sub,= %r29, %r22, %r0
|
||||||
b,n cas2_end
|
b,n cas2_end
|
||||||
/* Compare second word */
|
/* Compare second word */
|
||||||
20: ldw,ma 4(%r26), %r29
|
20: ldw 4(%r26), %r29
|
||||||
sub,= %r29, %r23, %r0
|
sub,= %r29, %r23, %r0
|
||||||
b,n cas2_end
|
b,n cas2_end
|
||||||
/* Perform the store */
|
/* Perform the store */
|
||||||
|
|
|
@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
|
||||||
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
|
if (cpu == 0)
|
||||||
|
continue;
|
||||||
|
if ((cpu0_loc != 0) &&
|
||||||
|
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
clocksource_cr16.name = "cr16_unstable";
|
clocksource_cr16.name = "cr16_unstable";
|
||||||
|
|
|
@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
|
||||||
* - we have no stack frame and can not allocate one
|
* - we have no stack frame and can not allocate one
|
||||||
* - LR points back to the original caller (in A)
|
* - LR points back to the original caller (in A)
|
||||||
* - CTR holds the new NIP in C
|
* - CTR holds the new NIP in C
|
||||||
* - r0 & r12 are free
|
* - r0, r11 & r12 are free
|
||||||
*
|
|
||||||
* r0 can't be used as the base register for a DS-form load or store, so
|
|
||||||
* we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
|
|
||||||
*/
|
*/
|
||||||
livepatch_handler:
|
livepatch_handler:
|
||||||
CURRENT_THREAD_INFO(r12, r1)
|
CURRENT_THREAD_INFO(r12, r1)
|
||||||
|
|
||||||
/* Save stack pointer into r0 */
|
|
||||||
mr r0, r1
|
|
||||||
|
|
||||||
/* Allocate 3 x 8 bytes */
|
/* Allocate 3 x 8 bytes */
|
||||||
ld r1, TI_livepatch_sp(r12)
|
ld r11, TI_livepatch_sp(r12)
|
||||||
addi r1, r1, 24
|
addi r11, r11, 24
|
||||||
std r1, TI_livepatch_sp(r12)
|
std r11, TI_livepatch_sp(r12)
|
||||||
|
|
||||||
/* Save toc & real LR on livepatch stack */
|
/* Save toc & real LR on livepatch stack */
|
||||||
std r2, -24(r1)
|
std r2, -24(r11)
|
||||||
mflr r12
|
mflr r12
|
||||||
std r12, -16(r1)
|
std r12, -16(r11)
|
||||||
|
|
||||||
/* Store stack end marker */
|
/* Store stack end marker */
|
||||||
lis r12, STACK_END_MAGIC@h
|
lis r12, STACK_END_MAGIC@h
|
||||||
ori r12, r12, STACK_END_MAGIC@l
|
ori r12, r12, STACK_END_MAGIC@l
|
||||||
std r12, -8(r1)
|
std r12, -8(r11)
|
||||||
|
|
||||||
/* Restore real stack pointer */
|
|
||||||
mr r1, r0
|
|
||||||
|
|
||||||
/* Put ctr in r12 for global entry and branch there */
|
/* Put ctr in r12 for global entry and branch there */
|
||||||
mfctr r12
|
mfctr r12
|
||||||
|
@ -216,36 +207,30 @@ livepatch_handler:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now we are returning from the patched function to the original
|
* Now we are returning from the patched function to the original
|
||||||
* caller A. We are free to use r0 and r12, and we can use r2 until we
|
* caller A. We are free to use r11, r12 and we can use r2 until we
|
||||||
* restore it.
|
* restore it.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
CURRENT_THREAD_INFO(r12, r1)
|
CURRENT_THREAD_INFO(r12, r1)
|
||||||
|
|
||||||
/* Save stack pointer into r0 */
|
ld r11, TI_livepatch_sp(r12)
|
||||||
mr r0, r1
|
|
||||||
|
|
||||||
ld r1, TI_livepatch_sp(r12)
|
|
||||||
|
|
||||||
/* Check stack marker hasn't been trashed */
|
/* Check stack marker hasn't been trashed */
|
||||||
lis r2, STACK_END_MAGIC@h
|
lis r2, STACK_END_MAGIC@h
|
||||||
ori r2, r2, STACK_END_MAGIC@l
|
ori r2, r2, STACK_END_MAGIC@l
|
||||||
ld r12, -8(r1)
|
ld r12, -8(r11)
|
||||||
1: tdne r12, r2
|
1: tdne r12, r2
|
||||||
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
|
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
|
||||||
|
|
||||||
/* Restore LR & toc from livepatch stack */
|
/* Restore LR & toc from livepatch stack */
|
||||||
ld r12, -16(r1)
|
ld r12, -16(r11)
|
||||||
mtlr r12
|
mtlr r12
|
||||||
ld r2, -24(r1)
|
ld r2, -24(r11)
|
||||||
|
|
||||||
/* Pop livepatch stack frame */
|
/* Pop livepatch stack frame */
|
||||||
CURRENT_THREAD_INFO(r12, r0)
|
CURRENT_THREAD_INFO(r12, r1)
|
||||||
subi r1, r1, 24
|
subi r11, r11, 24
|
||||||
std r1, TI_livepatch_sp(r12)
|
std r11, TI_livepatch_sp(r12)
|
||||||
|
|
||||||
/* Restore real stack pointer */
|
|
||||||
mr r1, r0
|
|
||||||
|
|
||||||
/* Return to original caller of live patched function */
|
/* Return to original caller of live patched function */
|
||||||
blr
|
blr
|
||||||
|
|
|
@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
|
||||||
* Logical instructions
|
* Logical instructions
|
||||||
*/
|
*/
|
||||||
case 26: /* cntlzw */
|
case 26: /* cntlzw */
|
||||||
op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
|
val = (unsigned int) regs->gpr[rd];
|
||||||
|
op->val = ( val ? __builtin_clz(val) : 32 );
|
||||||
goto logical_done;
|
goto logical_done;
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
case 58: /* cntlzd */
|
case 58: /* cntlzd */
|
||||||
op->val = __builtin_clzl(regs->gpr[rd]);
|
val = regs->gpr[rd];
|
||||||
|
op->val = ( val ? __builtin_clzl(val) : 64 );
|
||||||
goto logical_done;
|
goto logical_done;
|
||||||
#endif
|
#endif
|
||||||
case 28: /* and */
|
case 28: /* and */
|
||||||
|
|
|
@ -1438,7 +1438,6 @@ out:
|
||||||
|
|
||||||
int arch_update_cpu_topology(void)
|
int arch_update_cpu_topology(void)
|
||||||
{
|
{
|
||||||
lockdep_assert_cpus_held();
|
|
||||||
return numa_update_cpu_topology(true);
|
return numa_update_cpu_topology(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
|
||||||
|
|
||||||
/* Take the mutex lock for this node and then decrement the reference count */
|
/* Take the mutex lock for this node and then decrement the reference count */
|
||||||
mutex_lock(&ref->lock);
|
mutex_lock(&ref->lock);
|
||||||
|
if (ref->refc == 0) {
|
||||||
|
/*
|
||||||
|
* The scenario where this is true is, when perf session is
|
||||||
|
* started, followed by offlining of all cpus in a given node.
|
||||||
|
*
|
||||||
|
* In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
|
||||||
|
* function set the ref->count to zero, if the cpu which is
|
||||||
|
* about to offline is the last cpu in a given node and make
|
||||||
|
* an OPAL call to disable the engine in that node.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
mutex_unlock(&ref->lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
ref->refc--;
|
ref->refc--;
|
||||||
if (ref->refc == 0) {
|
if (ref->refc == 0) {
|
||||||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
||||||
|
@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
|
||||||
|
|
||||||
/* We need only vbase for core counters */
|
/* We need only vbase for core counters */
|
||||||
mem_info->vbase = page_address(alloc_pages_node(phys_id,
|
mem_info->vbase = page_address(alloc_pages_node(phys_id,
|
||||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
||||||
get_order(size)));
|
__GFP_NOWARN, get_order(size)));
|
||||||
if (!mem_info->vbase)
|
if (!mem_info->vbase)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&ref->lock);
|
mutex_lock(&ref->lock);
|
||||||
|
if (ref->refc == 0) {
|
||||||
|
/*
|
||||||
|
* The scenario where this is true is, when perf session is
|
||||||
|
* started, followed by offlining of all cpus in a given core.
|
||||||
|
*
|
||||||
|
* In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
|
||||||
|
* function set the ref->count to zero, if the cpu which is
|
||||||
|
* about to offline is the last cpu in a given core and make
|
||||||
|
* an OPAL call to disable the engine in that core.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
mutex_unlock(&ref->lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
ref->refc--;
|
ref->refc--;
|
||||||
if (ref->refc == 0) {
|
if (ref->refc == 0) {
|
||||||
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
|
||||||
|
@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
|
||||||
* free the memory in cpu offline path.
|
* free the memory in cpu offline path.
|
||||||
*/
|
*/
|
||||||
local_mem = page_address(alloc_pages_node(phys_id,
|
local_mem = page_address(alloc_pages_node(phys_id,
|
||||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
||||||
get_order(size)));
|
__GFP_NOWARN, get_order(size)));
|
||||||
if (!local_mem)
|
if (!local_mem)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Only free the attr_groups which are dynamically allocated */
|
/* Only free the attr_groups which are dynamically allocated */
|
||||||
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
|
if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
|
||||||
|
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
|
||||||
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
|
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
|
||||||
kfree(pmu_ptr);
|
kfree(pmu_ptr);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -27,6 +27,7 @@ CONFIG_NET=y
|
||||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||||
CONFIG_DEVTMPFS=y
|
CONFIG_DEVTMPFS=y
|
||||||
# CONFIG_FIRMWARE_IN_KERNEL is not set
|
# CONFIG_FIRMWARE_IN_KERNEL is not set
|
||||||
|
CONFIG_BLK_DEV_RAM=y
|
||||||
# CONFIG_BLK_DEV_XPRAM is not set
|
# CONFIG_BLK_DEV_XPRAM is not set
|
||||||
# CONFIG_DCSSBLK is not set
|
# CONFIG_DCSSBLK is not set
|
||||||
# CONFIG_DASD is not set
|
# CONFIG_DASD is not set
|
||||||
|
@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
|
||||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||||
CONFIG_PRINTK_TIME=y
|
CONFIG_PRINTK_TIME=y
|
||||||
CONFIG_DEBUG_INFO=y
|
CONFIG_DEBUG_INFO=y
|
||||||
|
CONFIG_DEBUG_FS=y
|
||||||
CONFIG_DEBUG_KERNEL=y
|
CONFIG_DEBUG_KERNEL=y
|
||||||
CONFIG_PANIC_ON_OOPS=y
|
CONFIG_PANIC_ON_OOPS=y
|
||||||
# CONFIG_SCHED_DEBUG is not set
|
# CONFIG_SCHED_DEBUG is not set
|
||||||
|
|
|
@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
|
||||||
lc->lpp = LPP_MAGIC;
|
lc->lpp = LPP_MAGIC;
|
||||||
lc->current_pid = tsk->pid;
|
lc->current_pid = tsk->pid;
|
||||||
lc->user_timer = tsk->thread.user_timer;
|
lc->user_timer = tsk->thread.user_timer;
|
||||||
|
lc->guest_timer = tsk->thread.guest_timer;
|
||||||
lc->system_timer = tsk->thread.system_timer;
|
lc->system_timer = tsk->thread.system_timer;
|
||||||
|
lc->hardirq_timer = tsk->thread.hardirq_timer;
|
||||||
|
lc->softirq_timer = tsk->thread.softirq_timer;
|
||||||
lc->steal_timer = 0;
|
lc->steal_timer = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@
|
||||||
/*
|
/*
|
||||||
* This is a sneaky trick to help the unwinder find pt_regs on the stack. The
|
* This is a sneaky trick to help the unwinder find pt_regs on the stack. The
|
||||||
* frame pointer is replaced with an encoded pointer to pt_regs. The encoding
|
* frame pointer is replaced with an encoded pointer to pt_regs. The encoding
|
||||||
* is just setting the LSB, which makes it an invalid stack address and is also
|
* is just clearing the MSB, which makes it an invalid stack address and is also
|
||||||
* a signal to the unwinder that it's a pt_regs pointer in disguise.
|
* a signal to the unwinder that it's a pt_regs pointer in disguise.
|
||||||
*
|
*
|
||||||
* NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
|
* NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
|
||||||
|
@ -185,7 +185,7 @@
|
||||||
.macro ENCODE_FRAME_POINTER
|
.macro ENCODE_FRAME_POINTER
|
||||||
#ifdef CONFIG_FRAME_POINTER
|
#ifdef CONFIG_FRAME_POINTER
|
||||||
mov %esp, %ebp
|
mov %esp, %ebp
|
||||||
orl $0x1, %ebp
|
andl $0x7fffffff, %ebp
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
|
||||||
pmus[i].type = type;
|
pmus[i].type = type;
|
||||||
pmus[i].boxes = kzalloc(size, GFP_KERNEL);
|
pmus[i].boxes = kzalloc(size, GFP_KERNEL);
|
||||||
if (!pmus[i].boxes)
|
if (!pmus[i].boxes)
|
||||||
return -ENOMEM;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
type->pmus = pmus;
|
type->pmus = pmus;
|
||||||
|
@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
|
||||||
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
|
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
|
||||||
sizeof(*attr_group), GFP_KERNEL);
|
sizeof(*attr_group), GFP_KERNEL);
|
||||||
if (!attr_group)
|
if (!attr_group)
|
||||||
return -ENOMEM;
|
goto err;
|
||||||
|
|
||||||
attrs = (struct attribute **)(attr_group + 1);
|
attrs = (struct attribute **)(attr_group + 1);
|
||||||
attr_group->name = "events";
|
attr_group->name = "events";
|
||||||
|
@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
|
||||||
}
|
}
|
||||||
|
|
||||||
type->pmu_group = &uncore_pmu_attr_group;
|
type->pmu_group = &uncore_pmu_attr_group;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
for (i = 0; i < type->num_boxes; i++)
|
||||||
|
kfree(pmus[i].boxes);
|
||||||
|
kfree(pmus);
|
||||||
|
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init
|
static int __init
|
||||||
|
|
|
@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
|
||||||
u32 *hv_vp_index;
|
u32 *hv_vp_index;
|
||||||
EXPORT_SYMBOL_GPL(hv_vp_index);
|
EXPORT_SYMBOL_GPL(hv_vp_index);
|
||||||
|
|
||||||
|
u32 hv_max_vp_index;
|
||||||
|
|
||||||
static int hv_cpu_init(unsigned int cpu)
|
static int hv_cpu_init(unsigned int cpu)
|
||||||
{
|
{
|
||||||
u64 msr_vp_index;
|
u64 msr_vp_index;
|
||||||
|
@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
|
||||||
|
|
||||||
hv_vp_index[smp_processor_id()] = msr_vp_index;
|
hv_vp_index[smp_processor_id()] = msr_vp_index;
|
||||||
|
|
||||||
|
if (msr_vp_index > hv_max_vp_index)
|
||||||
|
hv_max_vp_index = msr_vp_index;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
|
||||||
/* Each gva in gva_list encodes up to 4096 pages to flush */
|
/* Each gva in gva_list encodes up to 4096 pages to flush */
|
||||||
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
|
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
|
||||||
|
|
||||||
static struct hv_flush_pcpu __percpu *pcpu_flush;
|
static struct hv_flush_pcpu __percpu **pcpu_flush;
|
||||||
|
|
||||||
static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex;
|
static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fills in gva_list starting from offset. Returns the number of items added.
|
* Fills in gva_list starting from offset. Returns the number of items added.
|
||||||
|
@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
|
||||||
{
|
{
|
||||||
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
|
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
|
||||||
|
|
||||||
|
/* valid_bank_mask can represent up to 64 banks */
|
||||||
|
if (hv_max_vp_index / 64 >= 64)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
|
||||||
|
* structs are not cleared between calls, we risk flushing unneeded
|
||||||
|
* vCPUs otherwise.
|
||||||
|
*/
|
||||||
|
for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
|
||||||
|
flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some banks may end up being empty but this is acceptable.
|
* Some banks may end up being empty but this is acceptable.
|
||||||
*/
|
*/
|
||||||
|
@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
|
||||||
vcpu = hv_cpu_number_to_vp_number(cpu);
|
vcpu = hv_cpu_number_to_vp_number(cpu);
|
||||||
vcpu_bank = vcpu / 64;
|
vcpu_bank = vcpu / 64;
|
||||||
vcpu_offset = vcpu % 64;
|
vcpu_offset = vcpu % 64;
|
||||||
|
|
||||||
/* valid_bank_mask can represent up to 64 banks */
|
|
||||||
if (vcpu_bank >= 64)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
__set_bit(vcpu_offset, (unsigned long *)
|
__set_bit(vcpu_offset, (unsigned long *)
|
||||||
&flush->hv_vp_set.bank_contents[vcpu_bank]);
|
&flush->hv_vp_set.bank_contents[vcpu_bank]);
|
||||||
if (vcpu_bank >= nr_bank)
|
if (vcpu_bank >= nr_bank)
|
||||||
|
@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
|
||||||
const struct flush_tlb_info *info)
|
const struct flush_tlb_info *info)
|
||||||
{
|
{
|
||||||
int cpu, vcpu, gva_n, max_gvas;
|
int cpu, vcpu, gva_n, max_gvas;
|
||||||
|
struct hv_flush_pcpu **flush_pcpu;
|
||||||
struct hv_flush_pcpu *flush;
|
struct hv_flush_pcpu *flush;
|
||||||
u64 status = U64_MAX;
|
u64 status = U64_MAX;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
flush = this_cpu_ptr(pcpu_flush);
|
flush_pcpu = this_cpu_ptr(pcpu_flush);
|
||||||
|
|
||||||
|
if (unlikely(!*flush_pcpu))
|
||||||
|
*flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
|
||||||
|
|
||||||
|
flush = *flush_pcpu;
|
||||||
|
|
||||||
|
if (unlikely(!flush)) {
|
||||||
|
local_irq_restore(flags);
|
||||||
|
goto do_native;
|
||||||
|
}
|
||||||
|
|
||||||
if (info->mm) {
|
if (info->mm) {
|
||||||
flush->address_space = virt_to_phys(info->mm->pgd);
|
flush->address_space = virt_to_phys(info->mm->pgd);
|
||||||
|
@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
|
||||||
const struct flush_tlb_info *info)
|
const struct flush_tlb_info *info)
|
||||||
{
|
{
|
||||||
int nr_bank = 0, max_gvas, gva_n;
|
int nr_bank = 0, max_gvas, gva_n;
|
||||||
|
struct hv_flush_pcpu_ex **flush_pcpu;
|
||||||
struct hv_flush_pcpu_ex *flush;
|
struct hv_flush_pcpu_ex *flush;
|
||||||
u64 status = U64_MAX;
|
u64 status = U64_MAX;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
flush = this_cpu_ptr(pcpu_flush_ex);
|
flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
|
||||||
|
|
||||||
|
if (unlikely(!*flush_pcpu))
|
||||||
|
*flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
|
||||||
|
|
||||||
|
flush = *flush_pcpu;
|
||||||
|
|
||||||
|
if (unlikely(!flush)) {
|
||||||
|
local_irq_restore(flags);
|
||||||
|
goto do_native;
|
||||||
|
}
|
||||||
|
|
||||||
if (info->mm) {
|
if (info->mm) {
|
||||||
flush->address_space = virt_to_phys(info->mm->pgd);
|
flush->address_space = virt_to_phys(info->mm->pgd);
|
||||||
|
@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
|
||||||
flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
|
flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
|
||||||
status = hv_do_rep_hypercall(
|
status = hv_do_rep_hypercall(
|
||||||
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
|
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
|
||||||
0, nr_bank + 2, flush, NULL);
|
0, nr_bank, flush, NULL);
|
||||||
} else if (info->end &&
|
} else if (info->end &&
|
||||||
((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
|
((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
|
||||||
status = hv_do_rep_hypercall(
|
status = hv_do_rep_hypercall(
|
||||||
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
|
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
|
||||||
0, nr_bank + 2, flush, NULL);
|
0, nr_bank, flush, NULL);
|
||||||
} else {
|
} else {
|
||||||
gva_n = fill_gva_list(flush->gva_list, nr_bank,
|
gva_n = fill_gva_list(flush->gva_list, nr_bank,
|
||||||
info->start, info->end);
|
info->start, info->end);
|
||||||
status = hv_do_rep_hypercall(
|
status = hv_do_rep_hypercall(
|
||||||
HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
|
HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
|
||||||
gva_n, nr_bank + 2, flush, NULL);
|
gva_n, nr_bank, flush, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
|
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
|
||||||
pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
|
pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
|
||||||
else
|
else
|
||||||
pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
|
pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,8 +62,10 @@
|
||||||
#define new_len2 145f-144f
|
#define new_len2 145f-144f
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* max without conditionals. Idea adapted from:
|
* gas compatible max based on the idea from:
|
||||||
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
|
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
|
||||||
|
*
|
||||||
|
* The additional "-" is needed because gas uses a "true" value of -1.
|
||||||
*/
|
*/
|
||||||
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
|
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
|
||||||
|
|
||||||
|
|
|
@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
||||||
alt_end_marker ":\n"
|
alt_end_marker ":\n"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* max without conditionals. Idea adapted from:
|
* gas compatible max based on the idea from:
|
||||||
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
|
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
|
||||||
*
|
*
|
||||||
* The additional "-" is needed because gas works with s32s.
|
* The additional "-" is needed because gas uses a "true" value of -1.
|
||||||
*/
|
*/
|
||||||
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
|
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pad the second replacement alternative with additional NOPs if it is
|
* Pad the second replacement alternative with additional NOPs if it is
|
||||||
|
|
|
@ -187,7 +187,6 @@ struct mca_msr_regs {
|
||||||
|
|
||||||
extern struct mce_vendor_flags mce_flags;
|
extern struct mce_vendor_flags mce_flags;
|
||||||
|
|
||||||
extern struct mca_config mca_cfg;
|
|
||||||
extern struct mca_msr_regs msr_ops;
|
extern struct mca_msr_regs msr_ops;
|
||||||
|
|
||||||
enum mce_notifier_prios {
|
enum mce_notifier_prios {
|
||||||
|
|
|
@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
|
||||||
DEBUG_LOCKS_WARN_ON(preemptible());
|
DEBUG_LOCKS_WARN_ON(preemptible());
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
|
||||||
{
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
|
|
||||||
cpumask_clear_cpu(cpu, mm_cpumask(mm));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int init_new_context(struct task_struct *tsk,
|
static inline int init_new_context(struct task_struct *tsk,
|
||||||
struct mm_struct *mm)
|
struct mm_struct *mm)
|
||||||
|
|
|
@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
|
||||||
* to this information.
|
* to this information.
|
||||||
*/
|
*/
|
||||||
extern u32 *hv_vp_index;
|
extern u32 *hv_vp_index;
|
||||||
|
extern u32 hv_max_vp_index;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hv_cpu_number_to_vp_number() - Map CPU to VP.
|
* hv_cpu_number_to_vp_number() - Map CPU to VP.
|
||||||
|
|
|
@ -82,6 +82,13 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
|
||||||
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
|
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
|
||||||
|
* to init_mm when we switch to a kernel thread (e.g. the idle thread). If
|
||||||
|
* it's false, then we immediately switch CR3 when entering a kernel thread.
|
||||||
|
*/
|
||||||
|
DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 6 because 6 should be plenty and struct tlb_state will fit in
|
* 6 because 6 should be plenty and struct tlb_state will fit in
|
||||||
* two cache lines.
|
* two cache lines.
|
||||||
|
@ -104,6 +111,23 @@ struct tlb_state {
|
||||||
u16 loaded_mm_asid;
|
u16 loaded_mm_asid;
|
||||||
u16 next_asid;
|
u16 next_asid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can be in one of several states:
|
||||||
|
*
|
||||||
|
* - Actively using an mm. Our CPU's bit will be set in
|
||||||
|
* mm_cpumask(loaded_mm) and is_lazy == false;
|
||||||
|
*
|
||||||
|
* - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
|
||||||
|
* will not be set in mm_cpumask(&init_mm) and is_lazy == false.
|
||||||
|
*
|
||||||
|
* - Lazily using a real mm. loaded_mm != &init_mm, our bit
|
||||||
|
* is set in mm_cpumask(loaded_mm), but is_lazy == true.
|
||||||
|
* We're heuristically guessing that the CR3 load we
|
||||||
|
* skipped more than makes up for the overhead added by
|
||||||
|
* lazy mode.
|
||||||
|
*/
|
||||||
|
bool is_lazy;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Access to this CR4 shadow and to H/W CR4 is protected by
|
* Access to this CR4 shadow and to H/W CR4 is protected by
|
||||||
* disabling interrupts when modifying either one.
|
* disabling interrupts when modifying either one.
|
||||||
|
|
|
@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
|
||||||
return ~0U;
|
return ~0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 skx_deadline_rev(void)
|
||||||
|
{
|
||||||
|
switch (boot_cpu_data.x86_mask) {
|
||||||
|
case 0x03: return 0x01000136;
|
||||||
|
case 0x04: return 0x02000014;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ~0U;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct x86_cpu_id deadline_match[] = {
|
static const struct x86_cpu_id deadline_match[] = {
|
||||||
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
|
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
|
||||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
|
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
|
||||||
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
|
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
|
||||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014),
|
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
|
||||||
|
|
||||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
|
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
|
||||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
|
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
|
||||||
|
@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
|
||||||
const struct x86_cpu_id *m;
|
const struct x86_cpu_id *m;
|
||||||
u32 rev;
|
u32 rev;
|
||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
|
if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
|
||||||
|
boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
m = x86_match_cpu(deadline_match);
|
m = x86_match_cpu(deadline_match);
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
#ifndef __X86_MCE_INTERNAL_H__
|
||||||
|
#define __X86_MCE_INTERNAL_H__
|
||||||
|
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
|
|
||||||
|
@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { }
|
||||||
static inline void mce_register_injector_chain(struct notifier_block *nb) { }
|
static inline void mce_register_injector_chain(struct notifier_block *nb) { }
|
||||||
static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
|
static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern struct mca_config mca_cfg;
|
||||||
|
|
||||||
|
#endif /* __X86_MCE_INTERNAL_H__ */
|
||||||
|
|
|
@ -28,6 +28,8 @@
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/trace/irq_vectors.h>
|
#include <asm/trace/irq_vectors.h>
|
||||||
|
|
||||||
|
#include "mce-internal.h"
|
||||||
|
|
||||||
#define NR_BLOCKS 5
|
#define NR_BLOCKS 5
|
||||||
#define THRESHOLD_MAX 0xFFF
|
#define THRESHOLD_MAX 0xFFF
|
||||||
#define INT_TYPE_APIC 0x00020000
|
#define INT_TYPE_APIC 0x00020000
|
||||||
|
|
|
@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
|
||||||
bool *res = &dis_ucode_ldr;
|
bool *res = &dis_ucode_ldr;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!have_cpuid_p())
|
|
||||||
return *res;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
|
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
|
||||||
* completely accurate as xen pv guests don't see that CPUID bit set but
|
* completely accurate as xen pv guests don't see that CPUID bit set but
|
||||||
|
@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
|
||||||
void __init load_ucode_bsp(void)
|
void __init load_ucode_bsp(void)
|
||||||
{
|
{
|
||||||
unsigned int cpuid_1_eax;
|
unsigned int cpuid_1_eax;
|
||||||
|
bool intel = true;
|
||||||
|
|
||||||
if (check_loader_disabled_bsp())
|
if (!have_cpuid_p())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cpuid_1_eax = native_cpuid_eax(1);
|
cpuid_1_eax = native_cpuid_eax(1);
|
||||||
|
|
||||||
switch (x86_cpuid_vendor()) {
|
switch (x86_cpuid_vendor()) {
|
||||||
case X86_VENDOR_INTEL:
|
case X86_VENDOR_INTEL:
|
||||||
if (x86_family(cpuid_1_eax) >= 6)
|
if (x86_family(cpuid_1_eax) < 6)
|
||||||
load_ucode_intel_bsp();
|
return;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case X86_VENDOR_AMD:
|
case X86_VENDOR_AMD:
|
||||||
if (x86_family(cpuid_1_eax) >= 0x10)
|
if (x86_family(cpuid_1_eax) < 0x10)
|
||||||
load_ucode_amd_bsp(cpuid_1_eax);
|
return;
|
||||||
|
intel = false;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (check_loader_disabled_bsp())
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (intel)
|
||||||
|
load_ucode_intel_bsp();
|
||||||
|
else
|
||||||
|
load_ucode_amd_bsp(cpuid_1_eax);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool check_loader_disabled_ap(void)
|
static bool check_loader_disabled_ap(void)
|
||||||
|
|
|
@ -3,6 +3,15 @@
|
||||||
|
|
||||||
/* Kprobes and Optprobes common header */
|
/* Kprobes and Optprobes common header */
|
||||||
|
|
||||||
|
#include <asm/asm.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_FRAME_POINTER
|
||||||
|
# define SAVE_RBP_STRING " push %" _ASM_BP "\n" \
|
||||||
|
" mov %" _ASM_SP ", %" _ASM_BP "\n"
|
||||||
|
#else
|
||||||
|
# define SAVE_RBP_STRING " push %" _ASM_BP "\n"
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#define SAVE_REGS_STRING \
|
#define SAVE_REGS_STRING \
|
||||||
/* Skip cs, ip, orig_ax. */ \
|
/* Skip cs, ip, orig_ax. */ \
|
||||||
|
@ -17,7 +26,7 @@
|
||||||
" pushq %r10\n" \
|
" pushq %r10\n" \
|
||||||
" pushq %r11\n" \
|
" pushq %r11\n" \
|
||||||
" pushq %rbx\n" \
|
" pushq %rbx\n" \
|
||||||
" pushq %rbp\n" \
|
SAVE_RBP_STRING \
|
||||||
" pushq %r12\n" \
|
" pushq %r12\n" \
|
||||||
" pushq %r13\n" \
|
" pushq %r13\n" \
|
||||||
" pushq %r14\n" \
|
" pushq %r14\n" \
|
||||||
|
@ -48,7 +57,7 @@
|
||||||
" pushl %es\n" \
|
" pushl %es\n" \
|
||||||
" pushl %ds\n" \
|
" pushl %ds\n" \
|
||||||
" pushl %eax\n" \
|
" pushl %eax\n" \
|
||||||
" pushl %ebp\n" \
|
SAVE_RBP_STRING \
|
||||||
" pushl %edi\n" \
|
" pushl %edi\n" \
|
||||||
" pushl %esi\n" \
|
" pushl %esi\n" \
|
||||||
" pushl %edx\n" \
|
" pushl %edx\n" \
|
||||||
|
|
|
@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
* raw stack chunk with redzones:
|
* raw stack chunk with redzones:
|
||||||
*/
|
*/
|
||||||
__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
|
__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
|
||||||
regs->flags &= ~X86_EFLAGS_IF;
|
|
||||||
trace_hardirqs_off();
|
|
||||||
regs->ip = (unsigned long)(jp->entry);
|
regs->ip = (unsigned long)(jp->entry);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
|
||||||
load_cr3(initial_page_table);
|
load_cr3(initial_page_table);
|
||||||
#else
|
#else
|
||||||
write_cr3(real_mode_header->trampoline_pgd);
|
write_cr3(real_mode_header->trampoline_pgd);
|
||||||
|
|
||||||
|
/* Exiting long mode will fail if CR4.PCIDE is set. */
|
||||||
|
if (static_cpu_has(X86_FEATURE_PCID))
|
||||||
|
cr4_clear_bits(X86_CR4_PCIDE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Jump to the identity-mapped low memory code */
|
/* Jump to the identity-mapped low memory code */
|
||||||
|
|
|
@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
|
||||||
state->stack_info.type, state->stack_info.next_sp,
|
state->stack_info.type, state->stack_info.next_sp,
|
||||||
state->stack_mask, state->graph_idx);
|
state->stack_mask, state->graph_idx);
|
||||||
|
|
||||||
for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
|
for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
|
||||||
|
sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
|
||||||
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
|
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
|
||||||
* This determines if the frame pointer actually contains an encoded pointer to
|
* This determines if the frame pointer actually contains an encoded pointer to
|
||||||
* pt_regs on the stack. See ENCODE_FRAME_POINTER.
|
* pt_regs on the stack. See ENCODE_FRAME_POINTER.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
static struct pt_regs *decode_frame_pointer(unsigned long *bp)
|
static struct pt_regs *decode_frame_pointer(unsigned long *bp)
|
||||||
{
|
{
|
||||||
unsigned long regs = (unsigned long)bp;
|
unsigned long regs = (unsigned long)bp;
|
||||||
|
@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
|
||||||
|
|
||||||
return (struct pt_regs *)(regs & ~0x1);
|
return (struct pt_regs *)(regs & ~0x1);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
static struct pt_regs *decode_frame_pointer(unsigned long *bp)
|
||||||
|
{
|
||||||
|
unsigned long regs = (unsigned long)bp;
|
||||||
|
|
||||||
|
if (regs & 0x80000000)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return (struct pt_regs *)(regs | 0x80000000);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
|
||||||
|
#else
|
||||||
|
#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
|
||||||
|
#endif
|
||||||
|
|
||||||
static bool update_stack_state(struct unwind_state *state,
|
static bool update_stack_state(struct unwind_state *state,
|
||||||
unsigned long *next_bp)
|
unsigned long *next_bp)
|
||||||
|
@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
|
||||||
regs = decode_frame_pointer(next_bp);
|
regs = decode_frame_pointer(next_bp);
|
||||||
if (regs) {
|
if (regs) {
|
||||||
frame = (unsigned long *)regs;
|
frame = (unsigned long *)regs;
|
||||||
len = regs_size(regs);
|
len = KERNEL_REGS_SIZE;
|
||||||
state->got_irq = true;
|
state->got_irq = true;
|
||||||
} else {
|
} else {
|
||||||
frame = next_bp;
|
frame = next_bp;
|
||||||
|
@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
|
||||||
frame < prev_frame_end)
|
frame < prev_frame_end)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On 32-bit with user mode regs, make sure the last two regs are safe
|
||||||
|
* to access:
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
|
||||||
|
!on_stack(info, frame, len + 2*sizeof(long)))
|
||||||
|
return false;
|
||||||
|
|
||||||
/* Move state to the next frame: */
|
/* Move state to the next frame: */
|
||||||
if (regs) {
|
if (regs) {
|
||||||
state->regs = regs;
|
state->regs = regs;
|
||||||
|
@ -328,6 +355,13 @@ bad_address:
|
||||||
state->regs->sp < (unsigned long)task_pt_regs(state->task))
|
state->regs->sp < (unsigned long)task_pt_regs(state->task))
|
||||||
goto the_end;
|
goto the_end;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There are some known frame pointer issues on 32-bit. Disable
|
||||||
|
* unwinder warnings on 32-bit until it gets objtool support.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_X86_32))
|
||||||
|
goto the_end;
|
||||||
|
|
||||||
if (state->regs) {
|
if (state->regs) {
|
||||||
printk_deferred_once(KERN_WARNING
|
printk_deferred_once(KERN_WARNING
|
||||||
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
|
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
|
||||||
|
|
|
@ -3973,13 +3973,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
|
||||||
static inline bool is_last_gpte(struct kvm_mmu *mmu,
|
static inline bool is_last_gpte(struct kvm_mmu *mmu,
|
||||||
unsigned level, unsigned gpte)
|
unsigned level, unsigned gpte)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
|
|
||||||
* iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
|
|
||||||
* level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
|
|
||||||
*/
|
|
||||||
gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
|
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
|
||||||
* If it is clear, there are no large pages at this level, so clear
|
* If it is clear, there are no large pages at this level, so clear
|
||||||
|
@ -3987,6 +3980,13 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
|
||||||
*/
|
*/
|
||||||
gpte &= level - mmu->last_nonleaf_level;
|
gpte &= level - mmu->last_nonleaf_level;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
|
||||||
|
* iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
|
||||||
|
* level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
|
||||||
|
*/
|
||||||
|
gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
|
||||||
|
|
||||||
return gpte & PT_PAGE_SIZE_MASK;
|
return gpte & PT_PAGE_SIZE_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||||
|
|
||||||
update_permission_bitmask(vcpu, context, true);
|
update_permission_bitmask(vcpu, context, true);
|
||||||
update_pkru_bitmask(vcpu, context, true);
|
update_pkru_bitmask(vcpu, context, true);
|
||||||
|
update_last_nonleaf_level(vcpu, context);
|
||||||
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
|
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
|
||||||
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
|
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
|
||||||
}
|
}
|
||||||
|
|
|
@ -334,10 +334,11 @@ retry_walk:
|
||||||
--walker->level;
|
--walker->level;
|
||||||
|
|
||||||
index = PT_INDEX(addr, walker->level);
|
index = PT_INDEX(addr, walker->level);
|
||||||
|
|
||||||
table_gfn = gpte_to_gfn(pte);
|
table_gfn = gpte_to_gfn(pte);
|
||||||
offset = index * sizeof(pt_element_t);
|
offset = index * sizeof(pt_element_t);
|
||||||
pte_gpa = gfn_to_gpa(table_gfn) + offset;
|
pte_gpa = gfn_to_gpa(table_gfn) + offset;
|
||||||
|
|
||||||
|
BUG_ON(walker->level < 1);
|
||||||
walker->table_gfn[walker->level - 1] = table_gfn;
|
walker->table_gfn[walker->level - 1] = table_gfn;
|
||||||
walker->pte_gpa[walker->level - 1] = pte_gpa;
|
walker->pte_gpa[walker->level - 1] = pte_gpa;
|
||||||
|
|
||||||
|
|
|
@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
|
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
|
||||||
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
|
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
|
||||||
kvm_set_cr4(vcpu, vmcs12->host_cr4);
|
vmx_set_cr4(vcpu, vmcs12->host_cr4);
|
||||||
|
|
||||||
nested_ept_uninit_mmu_context(vcpu);
|
nested_ept_uninit_mmu_context(vcpu);
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,12 @@
|
||||||
# Kernel does not boot with instrumentation of tlb.c.
|
# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
|
||||||
KCOV_INSTRUMENT_tlb.o := n
|
KCOV_INSTRUMENT_tlb.o := n
|
||||||
|
KCOV_INSTRUMENT_mem_encrypt.o := n
|
||||||
|
|
||||||
|
KASAN_SANITIZE_mem_encrypt.o := n
|
||||||
|
|
||||||
|
ifdef CONFIG_FUNCTION_TRACER
|
||||||
|
CFLAGS_REMOVE_mem_encrypt.o = -pg
|
||||||
|
endif
|
||||||
|
|
||||||
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
|
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
|
||||||
pat.o pgtable.o physaddr.o setup_nx.o tlb.o
|
pat.o pgtable.o physaddr.o setup_nx.o tlb.o
|
||||||
|
|
|
@ -30,6 +30,8 @@
|
||||||
|
|
||||||
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
|
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
|
||||||
|
|
||||||
|
DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
|
||||||
|
|
||||||
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
|
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
|
||||||
u16 *new_asid, bool *need_flush)
|
u16 *new_asid, bool *need_flush)
|
||||||
{
|
{
|
||||||
|
@ -80,7 +82,7 @@ void leave_mm(int cpu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Warn if we're not lazy. */
|
/* Warn if we're not lazy. */
|
||||||
WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
|
WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
|
||||||
|
|
||||||
switch_mm(NULL, &init_mm, NULL);
|
switch_mm(NULL, &init_mm, NULL);
|
||||||
}
|
}
|
||||||
|
@ -142,45 +144,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
this_cpu_write(cpu_tlbstate.is_lazy, false);
|
||||||
|
|
||||||
if (real_prev == next) {
|
if (real_prev == next) {
|
||||||
VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
|
VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
|
||||||
next->context.ctx_id);
|
next->context.ctx_id);
|
||||||
|
|
||||||
if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
|
|
||||||
/*
|
|
||||||
* There's nothing to do: we weren't lazy, and we
|
|
||||||
* aren't changing our mm. We don't need to flush
|
|
||||||
* anything, nor do we need to update CR3, CR4, or
|
|
||||||
* LDTR.
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Resume remote flushes and then read tlb_gen. */
|
|
||||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
||||||
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
|
|
||||||
|
|
||||||
if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
|
|
||||||
next_tlb_gen) {
|
|
||||||
/*
|
|
||||||
* Ideally, we'd have a flush_tlb() variant that
|
|
||||||
* takes the known CR3 value as input. This would
|
|
||||||
* be faster on Xen PV and on hypothetical CPUs
|
|
||||||
* on which INVPCID is fast.
|
|
||||||
*/
|
|
||||||
this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
|
|
||||||
next_tlb_gen);
|
|
||||||
write_cr3(build_cr3(next, prev_asid));
|
|
||||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
|
|
||||||
TLB_FLUSH_ALL);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We just exited lazy mode, which means that CR4 and/or LDTR
|
* We don't currently support having a real mm loaded without
|
||||||
* may be stale. (Changes to the required CR4 and LDTR states
|
* our cpu set in mm_cpumask(). We have all the bookkeeping
|
||||||
* are not reflected in tlb_gen.)
|
* in place to figure out whether we would need to flush
|
||||||
|
* if our cpu were cleared in mm_cpumask(), but we don't
|
||||||
|
* currently use it.
|
||||||
*/
|
*/
|
||||||
|
if (WARN_ON_ONCE(real_prev != &init_mm &&
|
||||||
|
!cpumask_test_cpu(cpu, mm_cpumask(next))))
|
||||||
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||||
|
|
||||||
|
return;
|
||||||
} else {
|
} else {
|
||||||
u16 new_asid;
|
u16 new_asid;
|
||||||
bool need_flush;
|
bool need_flush;
|
||||||
|
@ -199,10 +180,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stop remote flushes for the previous mm */
|
/* Stop remote flushes for the previous mm */
|
||||||
if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
|
VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
|
||||||
cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
|
real_prev != &init_mm);
|
||||||
|
cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
|
||||||
VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start remote flushes and then read tlb_gen.
|
* Start remote flushes and then read tlb_gen.
|
||||||
|
@ -232,6 +212,37 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
switch_ldt(real_prev, next);
|
switch_ldt(real_prev, next);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* enter_lazy_tlb() is a hint from the scheduler that we are entering a
|
||||||
|
* kernel thread or other context without an mm. Acceptable implementations
|
||||||
|
* include doing nothing whatsoever, switching to init_mm, or various clever
|
||||||
|
* lazy tricks to try to minimize TLB flushes.
|
||||||
|
*
|
||||||
|
* The scheduler reserves the right to call enter_lazy_tlb() several times
|
||||||
|
* in a row. It will notify us that we're going back to a real mm by
|
||||||
|
* calling switch_mm_irqs_off().
|
||||||
|
*/
|
||||||
|
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||||
|
{
|
||||||
|
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (static_branch_unlikely(&tlb_use_lazy_mode)) {
|
||||||
|
/*
|
||||||
|
* There's a significant optimization that may be possible
|
||||||
|
* here. We have accurate enough TLB flush tracking that we
|
||||||
|
* don't need to maintain coherence of TLB per se when we're
|
||||||
|
* lazy. We do, however, need to maintain coherence of
|
||||||
|
* paging-structure caches. We could, in principle, leave our
|
||||||
|
* old mm loaded and only switch to init_mm when
|
||||||
|
* tlb_remove_page() happens.
|
||||||
|
*/
|
||||||
|
this_cpu_write(cpu_tlbstate.is_lazy, true);
|
||||||
|
} else {
|
||||||
|
switch_mm(NULL, &init_mm, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call this when reinitializing a CPU. It fixes the following potential
|
* Call this when reinitializing a CPU. It fixes the following potential
|
||||||
* problems:
|
* problems:
|
||||||
|
@ -303,16 +314,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
|
||||||
/* This code cannot presently handle being reentered. */
|
/* This code cannot presently handle being reentered. */
|
||||||
VM_WARN_ON(!irqs_disabled());
|
VM_WARN_ON(!irqs_disabled());
|
||||||
|
|
||||||
|
if (unlikely(loaded_mm == &init_mm))
|
||||||
|
return;
|
||||||
|
|
||||||
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
|
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
|
||||||
loaded_mm->context.ctx_id);
|
loaded_mm->context.ctx_id);
|
||||||
|
|
||||||
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
|
if (this_cpu_read(cpu_tlbstate.is_lazy)) {
|
||||||
/*
|
/*
|
||||||
* We're in lazy mode -- don't flush. We can get here on
|
* We're in lazy mode. We need to at least flush our
|
||||||
* remote flushes due to races and on local flushes if a
|
* paging-structure cache to avoid speculatively reading
|
||||||
* kernel thread coincidentally flushes the mm it's lazily
|
* garbage into our TLB. Since switching to init_mm is barely
|
||||||
* still using.
|
* slower than a minimal flush, just switch to init_mm.
|
||||||
*/
|
*/
|
||||||
|
switch_mm_irqs_off(NULL, &init_mm, NULL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -611,3 +626,57 @@ static int __init create_tlb_single_page_flush_ceiling(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
late_initcall(create_tlb_single_page_flush_ceiling);
|
late_initcall(create_tlb_single_page_flush_ceiling);
|
||||||
|
|
||||||
|
static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
|
||||||
|
size_t count, loff_t *ppos)
|
||||||
|
{
|
||||||
|
char buf[2];
|
||||||
|
|
||||||
|
buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
|
||||||
|
buf[1] = '\n';
|
||||||
|
|
||||||
|
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t tlblazy_write_file(struct file *file,
|
||||||
|
const char __user *user_buf, size_t count, loff_t *ppos)
|
||||||
|
{
|
||||||
|
bool val;
|
||||||
|
|
||||||
|
if (kstrtobool_from_user(user_buf, count, &val))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (val)
|
||||||
|
static_branch_enable(&tlb_use_lazy_mode);
|
||||||
|
else
|
||||||
|
static_branch_disable(&tlb_use_lazy_mode);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct file_operations fops_tlblazy = {
|
||||||
|
.read = tlblazy_read_file,
|
||||||
|
.write = tlblazy_write_file,
|
||||||
|
.llseek = default_llseek,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init init_tlb_use_lazy_mode(void)
|
||||||
|
{
|
||||||
|
if (boot_cpu_has(X86_FEATURE_PCID)) {
|
||||||
|
/*
|
||||||
|
* Heuristic: with PCID on, switching to and from
|
||||||
|
* init_mm is reasonably fast, but remote flush IPIs
|
||||||
|
* as expensive as ever, so turn off lazy TLB mode.
|
||||||
|
*
|
||||||
|
* We can't do this in setup_pcid() because static keys
|
||||||
|
* haven't been initialized yet, and it would blow up
|
||||||
|
* badly.
|
||||||
|
*/
|
||||||
|
static_branch_disable(&tlb_use_lazy_mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
|
||||||
|
arch_debugfs_dir, NULL, &fops_tlblazy);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
late_initcall(init_tlb_use_lazy_mode);
|
||||||
|
|
|
@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
|
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
|
||||||
"x86/xen/hvm_guest:prepare",
|
"x86/xen/guest:prepare",
|
||||||
cpu_up_prepare_cb, cpu_dead_cb);
|
cpu_up_prepare_cb, cpu_dead_cb);
|
||||||
if (rc >= 0) {
|
if (rc >= 0) {
|
||||||
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
||||||
"x86/xen/hvm_guest:online",
|
"x86/xen/guest:online",
|
||||||
xen_cpu_up_online, NULL);
|
xen_cpu_up_online, NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
|
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
|
||||||
|
|
26
block/bio.c
26
block/bio.c
|
@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
||||||
*/
|
*/
|
||||||
bmd->is_our_pages = map_data ? 0 : 1;
|
bmd->is_our_pages = map_data ? 0 : 1;
|
||||||
memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
|
memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
|
||||||
iov_iter_init(&bmd->iter, iter->type, bmd->iov,
|
bmd->iter = *iter;
|
||||||
iter->nr_segs, iter->count);
|
bmd->iter.iov = bmd->iov;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
bio = bio_kmalloc(gfp_mask, nr_pages);
|
bio = bio_kmalloc(gfp_mask, nr_pages);
|
||||||
|
@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||||
int ret, offset;
|
int ret, offset;
|
||||||
struct iov_iter i;
|
struct iov_iter i;
|
||||||
struct iovec iov;
|
struct iovec iov;
|
||||||
|
struct bio_vec *bvec;
|
||||||
|
|
||||||
iov_for_each(iov, i, *iter) {
|
iov_for_each(iov, i, *iter) {
|
||||||
unsigned long uaddr = (unsigned long) iov.iov_base;
|
unsigned long uaddr = (unsigned long) iov.iov_base;
|
||||||
|
@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||||
ret = get_user_pages_fast(uaddr, local_nr_pages,
|
ret = get_user_pages_fast(uaddr, local_nr_pages,
|
||||||
(iter->type & WRITE) != WRITE,
|
(iter->type & WRITE) != WRITE,
|
||||||
&pages[cur_page]);
|
&pages[cur_page]);
|
||||||
if (ret < local_nr_pages) {
|
if (unlikely(ret < local_nr_pages)) {
|
||||||
|
for (j = cur_page; j < page_limit; j++) {
|
||||||
|
if (!pages[j])
|
||||||
|
break;
|
||||||
|
put_page(pages[j]);
|
||||||
|
}
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
}
|
}
|
||||||
|
@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||||
offset = offset_in_page(uaddr);
|
offset = offset_in_page(uaddr);
|
||||||
for (j = cur_page; j < page_limit; j++) {
|
for (j = cur_page; j < page_limit; j++) {
|
||||||
unsigned int bytes = PAGE_SIZE - offset;
|
unsigned int bytes = PAGE_SIZE - offset;
|
||||||
|
unsigned short prev_bi_vcnt = bio->bi_vcnt;
|
||||||
|
|
||||||
if (len <= 0)
|
if (len <= 0)
|
||||||
break;
|
break;
|
||||||
|
@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||||
bytes)
|
bytes)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* check if vector was merged with previous
|
||||||
|
* drop page reference if needed
|
||||||
|
*/
|
||||||
|
if (bio->bi_vcnt == prev_bi_vcnt)
|
||||||
|
put_page(pages[j]);
|
||||||
|
|
||||||
len -= bytes;
|
len -= bytes;
|
||||||
offset = 0;
|
offset = 0;
|
||||||
}
|
}
|
||||||
|
@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
||||||
return bio;
|
return bio;
|
||||||
|
|
||||||
out_unmap:
|
out_unmap:
|
||||||
for (j = 0; j < nr_pages; j++) {
|
bio_for_each_segment_all(bvec, bio, j) {
|
||||||
if (!pages[j])
|
put_page(bvec->bv_page);
|
||||||
break;
|
|
||||||
put_page(pages[j]);
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
kfree(pages);
|
kfree(pages);
|
||||||
|
|
|
@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
|
||||||
char *req, *p;
|
char *req, *p;
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
|
BUG_ON(!id_0 && !id_1);
|
||||||
|
|
||||||
if (id_0) {
|
if (id_0) {
|
||||||
lookup = id_0->data;
|
lookup = id_0->data;
|
||||||
len = id_0->len;
|
len = id_0->len;
|
||||||
|
@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
|
||||||
if (id_0 && id_1) {
|
if (id_0 && id_1) {
|
||||||
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
|
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
|
||||||
|
|
||||||
if (!kids->id[0]) {
|
if (!kids->id[1]) {
|
||||||
pr_debug("First ID matches, but second is missing\n");
|
pr_debug("First ID matches, but second is missing\n");
|
||||||
goto reject;
|
goto reject;
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
|
||||||
bool want = false;
|
bool want = false;
|
||||||
|
|
||||||
sinfo = msg->signed_infos;
|
sinfo = msg->signed_infos;
|
||||||
|
if (!sinfo)
|
||||||
|
goto inconsistent;
|
||||||
|
|
||||||
if (sinfo->authattrs) {
|
if (sinfo->authattrs) {
|
||||||
want = true;
|
want = true;
|
||||||
msg->have_authattrs = true;
|
msg->have_authattrs = true;
|
||||||
|
|
|
@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
|
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
|
||||||
buffer = kmalloc(absize, GFP_KERNEL);
|
buffer = kmalloc(absize, GFP_ATOMIC);
|
||||||
if (!buffer)
|
if (!buffer)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
|
||||||
|
|
||||||
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
|
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
|
||||||
{
|
{
|
||||||
struct scatterlist *sg = req->src;
|
|
||||||
unsigned int offset = sg->offset;
|
|
||||||
unsigned int nbytes = req->nbytes;
|
unsigned int nbytes = req->nbytes;
|
||||||
|
struct scatterlist *sg;
|
||||||
|
unsigned int offset;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
|
if (nbytes &&
|
||||||
|
(sg = req->src, offset = sg->offset,
|
||||||
|
nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
|
||||||
void *data;
|
void *data;
|
||||||
|
|
||||||
data = kmap_atomic(sg_page(sg));
|
data = kmap_atomic(sg_page(sg));
|
||||||
|
|
|
@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
|
||||||
|
|
||||||
static int skcipher_walk_first(struct skcipher_walk *walk)
|
static int skcipher_walk_first(struct skcipher_walk *walk)
|
||||||
{
|
{
|
||||||
walk->nbytes = 0;
|
|
||||||
|
|
||||||
if (WARN_ON_ONCE(in_irq()))
|
if (WARN_ON_ONCE(in_irq()))
|
||||||
return -EDEADLK;
|
return -EDEADLK;
|
||||||
|
|
||||||
if (unlikely(!walk->total))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
walk->buffer = NULL;
|
walk->buffer = NULL;
|
||||||
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
||||||
int err = skcipher_copy_iv(walk);
|
int err = skcipher_copy_iv(walk);
|
||||||
|
@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
|
|
||||||
|
walk->total = req->cryptlen;
|
||||||
|
walk->nbytes = 0;
|
||||||
|
|
||||||
|
if (unlikely(!walk->total))
|
||||||
|
return 0;
|
||||||
|
|
||||||
scatterwalk_start(&walk->in, req->src);
|
scatterwalk_start(&walk->in, req->src);
|
||||||
scatterwalk_start(&walk->out, req->dst);
|
scatterwalk_start(&walk->out, req->dst);
|
||||||
|
|
||||||
walk->total = req->cryptlen;
|
|
||||||
walk->iv = req->iv;
|
walk->iv = req->iv;
|
||||||
walk->oiv = req->iv;
|
walk->oiv = req->iv;
|
||||||
|
|
||||||
|
@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
|
||||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
walk->nbytes = 0;
|
||||||
|
|
||||||
|
if (unlikely(!walk->total))
|
||||||
|
return 0;
|
||||||
|
|
||||||
walk->flags &= ~SKCIPHER_WALK_PHYS;
|
walk->flags &= ~SKCIPHER_WALK_PHYS;
|
||||||
|
|
||||||
scatterwalk_start(&walk->in, req->src);
|
scatterwalk_start(&walk->in, req->src);
|
||||||
|
|
|
@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||||
ctx->name[len - 1] = 0;
|
ctx->name[len - 1] = 0;
|
||||||
|
|
||||||
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||||
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
|
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
|
||||||
return -ENAMETOOLONG;
|
err = -ENAMETOOLONG;
|
||||||
|
goto err_drop_spawn;
|
||||||
|
}
|
||||||
} else
|
} else
|
||||||
goto err_drop_spawn;
|
goto err_drop_spawn;
|
||||||
|
|
||||||
|
|
|
@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
|
||||||
* }
|
* }
|
||||||
* }
|
* }
|
||||||
*
|
*
|
||||||
* Calling this function with index %2 return %-ENOENT and with index %3
|
* Calling this function with index %2 or index %3 return %-ENOENT. If the
|
||||||
* returns the last entry. If the property does not contain any more values
|
* property does not contain any more values %-ENOENT is returned. The NULL
|
||||||
* %-ENODATA is returned. The NULL entry must be single integer and
|
* entry must be single integer and preferably contain value %0.
|
||||||
* preferably contain value %0.
|
|
||||||
*
|
*
|
||||||
* Return: %0 on success, negative error code on failure.
|
* Return: %0 on success, negative error code on failure.
|
||||||
*/
|
*/
|
||||||
|
@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||||
|
|
||||||
data = acpi_device_data_of_node(fwnode);
|
data = acpi_device_data_of_node(fwnode);
|
||||||
if (!data)
|
if (!data)
|
||||||
return -EINVAL;
|
return -ENOENT;
|
||||||
|
|
||||||
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
|
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret == -EINVAL ? -ENOENT : -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The simplest case is when the value is a single reference. Just
|
* The simplest case is when the value is a single reference. Just
|
||||||
|
@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||||
|
|
||||||
ret = acpi_bus_get_device(obj->reference.handle, &device);
|
ret = acpi_bus_get_device(obj->reference.handle, &device);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret == -ENODEV ? -EINVAL : ret;
|
||||||
|
|
||||||
args->adev = device;
|
args->adev = device;
|
||||||
args->nargs = 0;
|
args->nargs = 0;
|
||||||
|
@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||||
* The index argument is then used to determine which reference
|
* The index argument is then used to determine which reference
|
||||||
* the caller wants (along with the arguments).
|
* the caller wants (along with the arguments).
|
||||||
*/
|
*/
|
||||||
if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count)
|
if (obj->type != ACPI_TYPE_PACKAGE)
|
||||||
return -EPROTO;
|
return -EINVAL;
|
||||||
|
if (index >= obj->package.count)
|
||||||
|
return -ENOENT;
|
||||||
|
|
||||||
element = obj->package.elements;
|
element = obj->package.elements;
|
||||||
end = element + obj->package.count;
|
end = element + obj->package.count;
|
||||||
|
@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||||
ret = acpi_bus_get_device(element->reference.handle,
|
ret = acpi_bus_get_device(element->reference.handle,
|
||||||
&device);
|
&device);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -ENODEV;
|
return -EINVAL;
|
||||||
|
|
||||||
nargs = 0;
|
nargs = 0;
|
||||||
element++;
|
element++;
|
||||||
|
@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||||
else if (type == ACPI_TYPE_LOCAL_REFERENCE)
|
else if (type == ACPI_TYPE_LOCAL_REFERENCE)
|
||||||
break;
|
break;
|
||||||
else
|
else
|
||||||
return -EPROTO;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nargs > MAX_ACPI_REFERENCE_ARGS)
|
if (nargs > MAX_ACPI_REFERENCE_ARGS)
|
||||||
return -EPROTO;
|
return -EINVAL;
|
||||||
|
|
||||||
if (idx == index) {
|
if (idx == index) {
|
||||||
args->adev = device;
|
args->adev = device;
|
||||||
|
@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
element++;
|
element++;
|
||||||
} else {
|
} else {
|
||||||
return -EPROTO;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx++;
|
idx++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return -ENODATA;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
|
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
|
||||||
|
|
||||||
|
|
|
@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* binder_get_node_refs_for_txn() - Get required refs on node for txn
|
||||||
|
* @node: struct binder_node for which to get refs
|
||||||
|
* @proc: returns @node->proc if valid
|
||||||
|
* @error: if no @proc then returns BR_DEAD_REPLY
|
||||||
|
*
|
||||||
|
* User-space normally keeps the node alive when creating a transaction
|
||||||
|
* since it has a reference to the target. The local strong ref keeps it
|
||||||
|
* alive if the sending process dies before the target process processes
|
||||||
|
* the transaction. If the source process is malicious or has a reference
|
||||||
|
* counting bug, relying on the local strong ref can fail.
|
||||||
|
*
|
||||||
|
* Since user-space can cause the local strong ref to go away, we also take
|
||||||
|
* a tmpref on the node to ensure it survives while we are constructing
|
||||||
|
* the transaction. We also need a tmpref on the proc while we are
|
||||||
|
* constructing the transaction, so we take that here as well.
|
||||||
|
*
|
||||||
|
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
|
||||||
|
* Also sets @proc if valid. If the @node->proc is NULL indicating that the
|
||||||
|
* target proc has died, @error is set to BR_DEAD_REPLY
|
||||||
|
*/
|
||||||
|
static struct binder_node *binder_get_node_refs_for_txn(
|
||||||
|
struct binder_node *node,
|
||||||
|
struct binder_proc **procp,
|
||||||
|
uint32_t *error)
|
||||||
|
{
|
||||||
|
struct binder_node *target_node = NULL;
|
||||||
|
|
||||||
|
binder_node_inner_lock(node);
|
||||||
|
if (node->proc) {
|
||||||
|
target_node = node;
|
||||||
|
binder_inc_node_nilocked(node, 1, 0, NULL);
|
||||||
|
binder_inc_node_tmpref_ilocked(node);
|
||||||
|
node->proc->tmp_ref++;
|
||||||
|
*procp = node->proc;
|
||||||
|
} else
|
||||||
|
*error = BR_DEAD_REPLY;
|
||||||
|
binder_node_inner_unlock(node);
|
||||||
|
|
||||||
|
return target_node;
|
||||||
|
}
|
||||||
|
|
||||||
static void binder_transaction(struct binder_proc *proc,
|
static void binder_transaction(struct binder_proc *proc,
|
||||||
struct binder_thread *thread,
|
struct binder_thread *thread,
|
||||||
struct binder_transaction_data *tr, int reply,
|
struct binder_transaction_data *tr, int reply,
|
||||||
|
@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
ref = binder_get_ref_olocked(proc, tr->target.handle,
|
ref = binder_get_ref_olocked(proc, tr->target.handle,
|
||||||
true);
|
true);
|
||||||
if (ref) {
|
if (ref) {
|
||||||
binder_inc_node(ref->node, 1, 0, NULL);
|
target_node = binder_get_node_refs_for_txn(
|
||||||
target_node = ref->node;
|
ref->node, &target_proc,
|
||||||
|
&return_error);
|
||||||
|
} else {
|
||||||
|
binder_user_error("%d:%d got transaction to invalid handle\n",
|
||||||
|
proc->pid, thread->pid);
|
||||||
|
return_error = BR_FAILED_REPLY;
|
||||||
}
|
}
|
||||||
binder_proc_unlock(proc);
|
binder_proc_unlock(proc);
|
||||||
if (target_node == NULL) {
|
|
||||||
binder_user_error("%d:%d got transaction to invalid handle\n",
|
|
||||||
proc->pid, thread->pid);
|
|
||||||
return_error = BR_FAILED_REPLY;
|
|
||||||
return_error_param = -EINVAL;
|
|
||||||
return_error_line = __LINE__;
|
|
||||||
goto err_invalid_target_handle;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&context->context_mgr_node_lock);
|
mutex_lock(&context->context_mgr_node_lock);
|
||||||
target_node = context->binder_context_mgr_node;
|
target_node = context->binder_context_mgr_node;
|
||||||
if (target_node == NULL) {
|
if (target_node)
|
||||||
|
target_node = binder_get_node_refs_for_txn(
|
||||||
|
target_node, &target_proc,
|
||||||
|
&return_error);
|
||||||
|
else
|
||||||
return_error = BR_DEAD_REPLY;
|
return_error = BR_DEAD_REPLY;
|
||||||
mutex_unlock(&context->context_mgr_node_lock);
|
|
||||||
return_error_line = __LINE__;
|
|
||||||
goto err_no_context_mgr_node;
|
|
||||||
}
|
|
||||||
binder_inc_node(target_node, 1, 0, NULL);
|
|
||||||
mutex_unlock(&context->context_mgr_node_lock);
|
mutex_unlock(&context->context_mgr_node_lock);
|
||||||
}
|
}
|
||||||
e->to_node = target_node->debug_id;
|
if (!target_node) {
|
||||||
binder_node_lock(target_node);
|
/*
|
||||||
target_proc = target_node->proc;
|
* return_error is set above
|
||||||
if (target_proc == NULL) {
|
*/
|
||||||
binder_node_unlock(target_node);
|
return_error_param = -EINVAL;
|
||||||
return_error = BR_DEAD_REPLY;
|
|
||||||
return_error_line = __LINE__;
|
return_error_line = __LINE__;
|
||||||
goto err_dead_binder;
|
goto err_dead_binder;
|
||||||
}
|
}
|
||||||
binder_inner_proc_lock(target_proc);
|
e->to_node = target_node->debug_id;
|
||||||
target_proc->tmp_ref++;
|
|
||||||
binder_inner_proc_unlock(target_proc);
|
|
||||||
binder_node_unlock(target_node);
|
|
||||||
if (security_binder_transaction(proc->tsk,
|
if (security_binder_transaction(proc->tsk,
|
||||||
target_proc->tsk) < 0) {
|
target_proc->tsk) < 0) {
|
||||||
return_error = BR_FAILED_REPLY;
|
return_error = BR_FAILED_REPLY;
|
||||||
|
@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||||
if (target_thread)
|
if (target_thread)
|
||||||
binder_thread_dec_tmpref(target_thread);
|
binder_thread_dec_tmpref(target_thread);
|
||||||
binder_proc_dec_tmpref(target_proc);
|
binder_proc_dec_tmpref(target_proc);
|
||||||
|
if (target_node)
|
||||||
|
binder_dec_node_tmpref(target_node);
|
||||||
/*
|
/*
|
||||||
* write barrier to synchronize with initialization
|
* write barrier to synchronize with initialization
|
||||||
* of log entry
|
* of log entry
|
||||||
|
@ -3090,6 +3126,8 @@ err_bad_parent:
|
||||||
err_copy_data_failed:
|
err_copy_data_failed:
|
||||||
trace_binder_transaction_failed_buffer_release(t->buffer);
|
trace_binder_transaction_failed_buffer_release(t->buffer);
|
||||||
binder_transaction_buffer_release(target_proc, t->buffer, offp);
|
binder_transaction_buffer_release(target_proc, t->buffer, offp);
|
||||||
|
if (target_node)
|
||||||
|
binder_dec_node_tmpref(target_node);
|
||||||
target_node = NULL;
|
target_node = NULL;
|
||||||
t->buffer->transaction = NULL;
|
t->buffer->transaction = NULL;
|
||||||
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
|
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
|
||||||
|
@ -3104,13 +3142,14 @@ err_bad_call_stack:
|
||||||
err_empty_call_stack:
|
err_empty_call_stack:
|
||||||
err_dead_binder:
|
err_dead_binder:
|
||||||
err_invalid_target_handle:
|
err_invalid_target_handle:
|
||||||
err_no_context_mgr_node:
|
|
||||||
if (target_thread)
|
if (target_thread)
|
||||||
binder_thread_dec_tmpref(target_thread);
|
binder_thread_dec_tmpref(target_thread);
|
||||||
if (target_proc)
|
if (target_proc)
|
||||||
binder_proc_dec_tmpref(target_proc);
|
binder_proc_dec_tmpref(target_proc);
|
||||||
if (target_node)
|
if (target_node) {
|
||||||
binder_dec_node(target_node, 1, 0);
|
binder_dec_node(target_node, 1, 0);
|
||||||
|
binder_dec_node_tmpref(target_node);
|
||||||
|
}
|
||||||
|
|
||||||
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
||||||
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
|
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
|
||||||
|
|
|
@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
|
||||||
|
|
||||||
static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
|
static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
|
||||||
{
|
{
|
||||||
|
ssize_t n;
|
||||||
|
cpumask_var_t mask;
|
||||||
struct node *node_dev = to_node(dev);
|
struct node *node_dev = to_node(dev);
|
||||||
const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
|
|
||||||
|
|
||||||
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
|
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
|
||||||
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
|
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
|
||||||
|
|
||||||
return cpumap_print_to_pagebuf(list, buf, mask);
|
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
|
||||||
|
n = cpumap_print_to_pagebuf(list, buf, mask);
|
||||||
|
free_cpumask_var(mask);
|
||||||
|
|
||||||
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ssize_t node_read_cpumask(struct device *dev,
|
static inline ssize_t node_read_cpumask(struct device *dev,
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include <linux/phy.h>
|
#include <linux/phy.h>
|
||||||
|
|
||||||
struct property_set {
|
struct property_set {
|
||||||
|
struct device *dev;
|
||||||
struct fwnode_handle fwnode;
|
struct fwnode_handle fwnode;
|
||||||
const struct property_entry *properties;
|
const struct property_entry *properties;
|
||||||
};
|
};
|
||||||
|
@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
|
||||||
* Caller is responsible to call fwnode_handle_put() on the returned
|
* Caller is responsible to call fwnode_handle_put() on the returned
|
||||||
* args->fwnode pointer.
|
* args->fwnode pointer.
|
||||||
*
|
*
|
||||||
|
* Returns: %0 on success
|
||||||
|
* %-ENOENT when the index is out of bounds, the index has an empty
|
||||||
|
* reference or the property was not found
|
||||||
|
* %-EINVAL on parse error
|
||||||
*/
|
*/
|
||||||
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
|
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
|
||||||
const char *prop, const char *nargs_prop,
|
const char *prop, const char *nargs_prop,
|
||||||
|
@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
|
||||||
void device_remove_properties(struct device *dev)
|
void device_remove_properties(struct device *dev)
|
||||||
{
|
{
|
||||||
struct fwnode_handle *fwnode;
|
struct fwnode_handle *fwnode;
|
||||||
|
struct property_set *pset;
|
||||||
|
|
||||||
fwnode = dev_fwnode(dev);
|
fwnode = dev_fwnode(dev);
|
||||||
if (!fwnode)
|
if (!fwnode)
|
||||||
|
@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
|
||||||
* the pset. If there is no real firmware node (ACPI/DT) primary
|
* the pset. If there is no real firmware node (ACPI/DT) primary
|
||||||
* will hold the pset.
|
* will hold the pset.
|
||||||
*/
|
*/
|
||||||
if (is_pset_node(fwnode)) {
|
pset = to_pset_node(fwnode);
|
||||||
|
if (pset) {
|
||||||
set_primary_fwnode(dev, NULL);
|
set_primary_fwnode(dev, NULL);
|
||||||
pset_free_set(to_pset_node(fwnode));
|
|
||||||
} else {
|
} else {
|
||||||
fwnode = fwnode->secondary;
|
pset = to_pset_node(fwnode->secondary);
|
||||||
if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
|
if (pset && dev == pset->dev)
|
||||||
set_secondary_fwnode(dev, NULL);
|
set_secondary_fwnode(dev, NULL);
|
||||||
pset_free_set(to_pset_node(fwnode));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
if (pset && dev == pset->dev)
|
||||||
|
pset_free_set(pset);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(device_remove_properties);
|
EXPORT_SYMBOL_GPL(device_remove_properties);
|
||||||
|
|
||||||
|
@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
|
||||||
|
|
||||||
p->fwnode.ops = &pset_fwnode_ops;
|
p->fwnode.ops = &pset_fwnode_ops;
|
||||||
set_secondary_fwnode(dev, &p->fwnode);
|
set_secondary_fwnode(dev, &p->fwnode);
|
||||||
|
p->dev = dev;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(device_add_properties);
|
EXPORT_SYMBOL_GPL(device_add_properties);
|
||||||
|
|
|
@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
|
||||||
struct nbd_config *config = nbd->config;
|
struct nbd_config *config = nbd->config;
|
||||||
config->blksize = blocksize;
|
config->blksize = blocksize;
|
||||||
config->bytesize = blocksize * nr_blocks;
|
config->bytesize = blocksize * nr_blocks;
|
||||||
nbd_size_update(nbd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nbd_complete_rq(struct request *req)
|
static void nbd_complete_rq(struct request *req)
|
||||||
|
@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd)
|
||||||
args->index = i;
|
args->index = i;
|
||||||
queue_work(recv_workqueue, &args->work);
|
queue_work(recv_workqueue, &args->work);
|
||||||
}
|
}
|
||||||
|
nbd_size_update(nbd);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
|
||||||
return NULL;
|
return NULL;
|
||||||
*dma_handle = dma_map_single(dev, buf, s->size, dir);
|
*dma_handle = dma_map_single(dev, buf, s->size, dir);
|
||||||
if (dma_mapping_error(dev, *dma_handle)) {
|
if (dma_mapping_error(dev, *dma_handle)) {
|
||||||
kfree(buf);
|
kmem_cache_free(s, buf);
|
||||||
buf = NULL;
|
buf = NULL;
|
||||||
}
|
}
|
||||||
return buf;
|
return buf;
|
||||||
|
|
|
@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
|
||||||
if (mbus->hw_io_coherency)
|
if (mbus->hw_io_coherency)
|
||||||
w->mbus_attr |= ATTR_HW_COHERENCY;
|
w->mbus_attr |= ATTR_HW_COHERENCY;
|
||||||
w->base = base & DDR_BASE_CS_LOW_MASK;
|
w->base = base & DDR_BASE_CS_LOW_MASK;
|
||||||
w->size = (size | ~DDR_SIZE_MASK) + 1;
|
w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mvebu_mbus_dram_info.num_cs = cs;
|
mvebu_mbus_dram_info.num_cs = cs;
|
||||||
|
|
|
@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
|
||||||
/* The crypto framework makes it hard to avoid this global. */
|
/* The crypto framework makes it hard to avoid this global. */
|
||||||
static struct device *artpec6_crypto_dev;
|
static struct device *artpec6_crypto_dev;
|
||||||
|
|
||||||
static struct dentry *dbgfs_root;
|
|
||||||
|
|
||||||
#ifdef CONFIG_FAULT_INJECTION
|
#ifdef CONFIG_FAULT_INJECTION
|
||||||
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
|
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
|
||||||
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
|
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
|
||||||
|
@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
|
||||||
char *desc;
|
char *desc;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct dentry *dbgfs_root;
|
||||||
|
|
||||||
static void artpec6_crypto_init_debugfs(void)
|
static void artpec6_crypto_init_debugfs(void)
|
||||||
{
|
{
|
||||||
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
|
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
|
||||||
|
|
|
@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
|
||||||
{
|
{
|
||||||
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
|
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
|
||||||
struct scatterlist sg[1], *tsg;
|
struct scatterlist sg[1], *tsg;
|
||||||
int err = 0, len = 0, reg, ncp;
|
int err = 0, len = 0, reg, ncp = 0;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
const u32 *buffer = (const u32 *)rctx->buffer;
|
u32 *buffer = (void *)rctx->buffer;
|
||||||
|
|
||||||
rctx->sg = hdev->req->src;
|
rctx->sg = hdev->req->src;
|
||||||
rctx->total = hdev->req->nbytes;
|
rctx->total = hdev->req->nbytes;
|
||||||
|
@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
|
||||||
reg |= HASH_CR_DMAA;
|
reg |= HASH_CR_DMAA;
|
||||||
stm32_hash_write(hdev, HASH_CR, reg);
|
stm32_hash_write(hdev, HASH_CR, reg);
|
||||||
|
|
||||||
for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
|
if (ncp) {
|
||||||
stm32_hash_write(hdev, HASH_DIN, buffer[i]);
|
memset(buffer + ncp, 0,
|
||||||
|
DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
|
||||||
stm32_hash_set_nblw(hdev, ncp);
|
writesl(hdev->io_base + HASH_DIN, buffer,
|
||||||
|
DIV_ROUND_UP(ncp, sizeof(u32)));
|
||||||
|
}
|
||||||
|
stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
|
||||||
reg = stm32_hash_read(hdev, HASH_STR);
|
reg = stm32_hash_read(hdev, HASH_STR);
|
||||||
reg |= HASH_STR_DCAL;
|
reg |= HASH_STR_DCAL;
|
||||||
stm32_hash_write(hdev, HASH_STR, reg);
|
stm32_hash_write(hdev, HASH_STR, reg);
|
||||||
|
|
|
@ -383,7 +383,7 @@ err_put_fd:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sync_fill_fence_info(struct dma_fence *fence,
|
static int sync_fill_fence_info(struct dma_fence *fence,
|
||||||
struct sync_fence_info *info)
|
struct sync_fence_info *info)
|
||||||
{
|
{
|
||||||
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
|
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
|
||||||
|
@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
|
||||||
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
|
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
|
||||||
ktime_to_ns(fence->timestamp) :
|
ktime_to_ns(fence->timestamp) :
|
||||||
ktime_set(0, 0);
|
ktime_set(0, 0);
|
||||||
|
|
||||||
|
return info->status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
||||||
|
@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
||||||
* sync_fence_info and return the actual number of fences on
|
* sync_fence_info and return the actual number of fences on
|
||||||
* info->num_fences.
|
* info->num_fences.
|
||||||
*/
|
*/
|
||||||
if (!info.num_fences)
|
if (!info.num_fences) {
|
||||||
|
info.status = dma_fence_is_signaled(sync_file->fence);
|
||||||
goto no_fences;
|
goto no_fences;
|
||||||
|
} else {
|
||||||
|
info.status = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (info.num_fences < num_fences)
|
if (info.num_fences < num_fences)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
||||||
if (!fence_info)
|
if (!fence_info)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (i = 0; i < num_fences; i++)
|
for (i = 0; i < num_fences; i++) {
|
||||||
sync_fill_fence_info(fences[i], &fence_info[i]);
|
int status = sync_fill_fence_info(fences[i], &fence_info[i]);
|
||||||
|
info.status = info.status <= 0 ? info.status : status;
|
||||||
|
}
|
||||||
|
|
||||||
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
|
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
|
||||||
size)) {
|
size)) {
|
||||||
|
@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
|
||||||
|
|
||||||
no_fences:
|
no_fences:
|
||||||
sync_file_get_name(sync_file, info.name, sizeof(info.name));
|
sync_file_get_name(sync_file, info.name, sizeof(info.name));
|
||||||
info.status = dma_fence_is_signaled(sync_file->fence);
|
|
||||||
info.num_fences = num_fences;
|
info.num_fences = num_fences;
|
||||||
|
|
||||||
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
|
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
|
||||||
|
|
|
@ -212,11 +212,12 @@ struct msgdma_device {
|
||||||
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
|
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
|
||||||
{
|
{
|
||||||
struct msgdma_sw_desc *desc;
|
struct msgdma_sw_desc *desc;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
|
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
|
||||||
list_del(&desc->node);
|
list_del(&desc->node);
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&desc->tx_list);
|
INIT_LIST_HEAD(&desc->tx_list);
|
||||||
|
|
||||||
|
@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||||
struct msgdma_device *mdev = to_mdev(tx->chan);
|
struct msgdma_device *mdev = to_mdev(tx->chan);
|
||||||
struct msgdma_sw_desc *new;
|
struct msgdma_sw_desc *new;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
new = tx_to_desc(tx);
|
new = tx_to_desc(tx);
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
cookie = dma_cookie_assign(tx);
|
cookie = dma_cookie_assign(tx);
|
||||||
|
|
||||||
list_add_tail(&new->node, &mdev->pending_list);
|
list_add_tail(&new->node, &mdev->pending_list);
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
|
||||||
struct msgdma_extended_desc *desc;
|
struct msgdma_extended_desc *desc;
|
||||||
size_t copy;
|
size_t copy;
|
||||||
u32 desc_cnt;
|
u32 desc_cnt;
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
|
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, irqflags);
|
||||||
if (desc_cnt > mdev->desc_free_cnt) {
|
if (desc_cnt > mdev->desc_free_cnt) {
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, irqflags);
|
||||||
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
|
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
mdev->desc_free_cnt -= desc_cnt;
|
mdev->desc_free_cnt -= desc_cnt;
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, irqflags);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/* Allocate and populate the descriptor */
|
/* Allocate and populate the descriptor */
|
||||||
|
@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
|
||||||
u32 desc_cnt = 0, i;
|
u32 desc_cnt = 0, i;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
u32 stride;
|
u32 stride;
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, sg_len, i)
|
for_each_sg(sgl, sg, sg_len, i)
|
||||||
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
|
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, irqflags);
|
||||||
if (desc_cnt > mdev->desc_free_cnt) {
|
if (desc_cnt > mdev->desc_free_cnt) {
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, irqflags);
|
||||||
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
|
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
mdev->desc_free_cnt -= desc_cnt;
|
mdev->desc_free_cnt -= desc_cnt;
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, irqflags);
|
||||||
|
|
||||||
avail = sg_dma_len(sgl);
|
avail = sg_dma_len(sgl);
|
||||||
|
|
||||||
|
@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
|
||||||
static void msgdma_issue_pending(struct dma_chan *chan)
|
static void msgdma_issue_pending(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct msgdma_device *mdev = to_mdev(chan);
|
struct msgdma_device *mdev = to_mdev(chan);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
msgdma_start_transfer(mdev);
|
msgdma_start_transfer(mdev);
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
|
||||||
static void msgdma_free_chan_resources(struct dma_chan *dchan)
|
static void msgdma_free_chan_resources(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
struct msgdma_device *mdev = to_mdev(dchan);
|
struct msgdma_device *mdev = to_mdev(dchan);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
msgdma_free_descriptors(mdev);
|
msgdma_free_descriptors(mdev);
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
kfree(mdev->sw_desq);
|
kfree(mdev->sw_desq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
|
||||||
u32 count;
|
u32 count;
|
||||||
u32 __maybe_unused size;
|
u32 __maybe_unused size;
|
||||||
u32 __maybe_unused status;
|
u32 __maybe_unused status;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
|
|
||||||
/* Read number of responses that are available */
|
/* Read number of responses that are available */
|
||||||
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
|
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
|
||||||
|
@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
|
||||||
* bits. So we need to just drop these values.
|
* bits. So we need to just drop these values.
|
||||||
*/
|
*/
|
||||||
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
|
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
|
||||||
status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
|
status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
|
||||||
|
|
||||||
msgdma_complete_descriptor(mdev);
|
msgdma_complete_descriptor(mdev);
|
||||||
msgdma_chan_desc_cleanup(mdev);
|
msgdma_chan_desc_cleanup(mdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
||||||
struct edma_desc *edesc;
|
struct edma_desc *edesc;
|
||||||
struct device *dev = chan->device->dev;
|
struct device *dev = chan->device->dev;
|
||||||
struct edma_chan *echan = to_edma_chan(chan);
|
struct edma_chan *echan = to_edma_chan(chan);
|
||||||
unsigned int width, pset_len;
|
unsigned int width, pset_len, array_size;
|
||||||
|
|
||||||
if (unlikely(!echan || !len))
|
if (unlikely(!echan || !len))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* Align the array size (acnt block) with the transfer properties */
|
||||||
|
switch (__ffs((src | dest | len))) {
|
||||||
|
case 0:
|
||||||
|
array_size = SZ_32K - 1;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
array_size = SZ_32K - 2;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
array_size = SZ_32K - 4;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (len < SZ_64K) {
|
if (len < SZ_64K) {
|
||||||
/*
|
/*
|
||||||
* Transfer size less than 64K can be handled with one paRAM
|
* Transfer size less than 64K can be handled with one paRAM
|
||||||
|
@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
||||||
* When the full_length is multibple of 32767 one slot can be
|
* When the full_length is multibple of 32767 one slot can be
|
||||||
* used to complete the transfer.
|
* used to complete the transfer.
|
||||||
*/
|
*/
|
||||||
width = SZ_32K - 1;
|
width = array_size;
|
||||||
pset_len = rounddown(len, width);
|
pset_len = rounddown(len, width);
|
||||||
/* One slot is enough for lengths multiple of (SZ_32K -1) */
|
/* One slot is enough for lengths multiple of (SZ_32K -1) */
|
||||||
if (unlikely(pset_len == len))
|
if (unlikely(pset_len == len))
|
||||||
|
@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
||||||
}
|
}
|
||||||
dest += pset_len;
|
dest += pset_len;
|
||||||
src += pset_len;
|
src += pset_len;
|
||||||
pset_len = width = len % (SZ_32K - 1);
|
pset_len = width = len % array_size;
|
||||||
|
|
||||||
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
|
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
|
||||||
width, pset_len, DMA_MEM_TO_MEM);
|
width, pset_len, DMA_MEM_TO_MEM);
|
||||||
|
|
|
@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
|
||||||
mutex_lock(&xbar->mutex);
|
mutex_lock(&xbar->mutex);
|
||||||
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
|
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
|
||||||
xbar->dma_requests);
|
xbar->dma_requests);
|
||||||
mutex_unlock(&xbar->mutex);
|
|
||||||
if (map->xbar_out == xbar->dma_requests) {
|
if (map->xbar_out == xbar->dma_requests) {
|
||||||
|
mutex_unlock(&xbar->mutex);
|
||||||
dev_err(&pdev->dev, "Run out of free DMA requests\n");
|
dev_err(&pdev->dev, "Run out of free DMA requests\n");
|
||||||
kfree(map);
|
kfree(map);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
set_bit(map->xbar_out, xbar->dma_inuse);
|
set_bit(map->xbar_out, xbar->dma_inuse);
|
||||||
|
mutex_unlock(&xbar->mutex);
|
||||||
|
|
||||||
map->xbar_in = (u16)dma_spec->args[0];
|
map->xbar_in = (u16)dma_spec->args[0];
|
||||||
|
|
||||||
|
|
|
@ -453,7 +453,8 @@ config GPIO_TS4800
|
||||||
config GPIO_THUNDERX
|
config GPIO_THUNDERX
|
||||||
tristate "Cavium ThunderX/OCTEON-TX GPIO"
|
tristate "Cavium ThunderX/OCTEON-TX GPIO"
|
||||||
depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
|
depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
|
||||||
depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
|
depends on PCI_MSI
|
||||||
|
select IRQ_DOMAIN_HIERARCHY
|
||||||
select IRQ_FASTEOI_HIERARCHY_HANDLERS
|
select IRQ_FASTEOI_HIERARCHY_HANDLERS
|
||||||
help
|
help
|
||||||
Say yes here to support the on-chip GPIO lines on the ThunderX
|
Say yes here to support the on-chip GPIO lines on the ThunderX
|
||||||
|
|
|
@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
|
||||||
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
|
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
|
||||||
irq_set_handler_locked(d, handle_level_irq);
|
irq_set_handler_locked(d, handle_level_irq);
|
||||||
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
|
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
|
||||||
irq_set_handler_locked(d, handle_edge_irq);
|
/*
|
||||||
|
* Edge IRQs are already cleared/acked in irq_handler and
|
||||||
|
* not need to be masked, as result handle_edge_irq()
|
||||||
|
* logic is excessed here and may cause lose of interrupts.
|
||||||
|
* So just use handle_simple_irq.
|
||||||
|
*/
|
||||||
|
irq_set_handler_locked(d, handle_simple_irq);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
|
||||||
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
|
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
|
||||||
{
|
{
|
||||||
void __iomem *isr_reg = NULL;
|
void __iomem *isr_reg = NULL;
|
||||||
u32 isr;
|
u32 enabled, isr, level_mask;
|
||||||
unsigned int bit;
|
unsigned int bit;
|
||||||
struct gpio_bank *bank = gpiobank;
|
struct gpio_bank *bank = gpiobank;
|
||||||
unsigned long wa_lock_flags;
|
unsigned long wa_lock_flags;
|
||||||
|
@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
|
||||||
pm_runtime_get_sync(bank->chip.parent);
|
pm_runtime_get_sync(bank->chip.parent);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
u32 isr_saved, level_mask = 0;
|
|
||||||
u32 enabled;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&bank->lock, lock_flags);
|
raw_spin_lock_irqsave(&bank->lock, lock_flags);
|
||||||
|
|
||||||
enabled = omap_get_gpio_irqbank_mask(bank);
|
enabled = omap_get_gpio_irqbank_mask(bank);
|
||||||
isr_saved = isr = readl_relaxed(isr_reg) & enabled;
|
isr = readl_relaxed(isr_reg) & enabled;
|
||||||
|
|
||||||
if (bank->level_mask)
|
if (bank->level_mask)
|
||||||
level_mask = bank->level_mask & enabled;
|
level_mask = bank->level_mask & enabled;
|
||||||
|
else
|
||||||
|
level_mask = 0;
|
||||||
|
|
||||||
/* clear edge sensitive interrupts before handler(s) are
|
/* clear edge sensitive interrupts before handler(s) are
|
||||||
called so that we don't miss any interrupt occurred while
|
called so that we don't miss any interrupt occurred while
|
||||||
executing them */
|
executing them */
|
||||||
omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
|
if (isr & ~level_mask)
|
||||||
omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
|
omap_clear_gpio_irqbank(bank, isr & ~level_mask);
|
||||||
omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
|
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
|
raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
|
||||||
|
|
||||||
|
@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
|
||||||
|
|
||||||
/*---------------------------------------------------------------------*/
|
/*---------------------------------------------------------------------*/
|
||||||
|
|
||||||
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
|
static void omap_gpio_show_rev(struct gpio_bank *bank)
|
||||||
{
|
{
|
||||||
static bool called;
|
static bool called;
|
||||||
u32 rev;
|
u32 rev;
|
||||||
|
|
|
@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
|
||||||
|
|
||||||
if (pin <= 255) {
|
if (pin <= 255) {
|
||||||
char ev_name[5];
|
char ev_name[5];
|
||||||
sprintf(ev_name, "_%c%02X",
|
sprintf(ev_name, "_%c%02hhX",
|
||||||
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
|
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
|
||||||
pin);
|
pin);
|
||||||
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
|
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
|
||||||
|
|
|
@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
||||||
placement.busy_placement = &placements;
|
placement.busy_placement = &placements;
|
||||||
placements.fpfn = 0;
|
placements.fpfn = 0;
|
||||||
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
|
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
|
||||||
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
|
||||||
|
|
||||||
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
|
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
|
|
|
@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_entity *entity)
|
struct amd_sched_entity *entity)
|
||||||
{
|
{
|
||||||
struct amd_sched_rq *rq = entity->rq;
|
struct amd_sched_rq *rq = entity->rq;
|
||||||
int r;
|
|
||||||
|
|
||||||
if (!amd_sched_entity_is_initialized(sched, entity))
|
if (!amd_sched_entity_is_initialized(sched, entity))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The client will not queue more IBs during this fini, consume existing
|
* The client will not queue more IBs during this fini, consume existing
|
||||||
* queued IBs or discard them on SIGKILL
|
* queued IBs
|
||||||
*/
|
*/
|
||||||
if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
|
wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
|
||||||
r = -ERESTARTSYS;
|
|
||||||
else
|
|
||||||
r = wait_event_killable(sched->job_scheduled,
|
|
||||||
amd_sched_entity_is_idle(entity));
|
|
||||||
amd_sched_rq_remove_entity(rq, entity);
|
amd_sched_rq_remove_entity(rq, entity);
|
||||||
if (r) {
|
|
||||||
struct amd_sched_job *job;
|
|
||||||
|
|
||||||
/* Park the kernel for a moment to make sure it isn't processing
|
|
||||||
* our enity.
|
|
||||||
*/
|
|
||||||
kthread_park(sched->thread);
|
|
||||||
kthread_unpark(sched->thread);
|
|
||||||
while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
|
|
||||||
sched->ops->free_job(job);
|
|
||||||
|
|
||||||
}
|
|
||||||
kfifo_free(&entity->job_queue);
|
kfifo_free(&entity->job_queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2960,6 +2960,7 @@ out:
|
||||||
drm_modeset_backoff(&ctx);
|
drm_modeset_backoff(&ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_atomic_state_put(state);
|
||||||
drm_modeset_drop_locks(&ctx);
|
drm_modeset_drop_locks(&ctx);
|
||||||
drm_modeset_acquire_fini(&ctx);
|
drm_modeset_acquire_fini(&ctx);
|
||||||
|
|
||||||
|
|
|
@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
|
||||||
static int exynos_drm_suspend(struct device *dev)
|
static int exynos_drm_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||||
struct exynos_drm_private *private = drm_dev->dev_private;
|
struct exynos_drm_private *private;
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev) || !drm_dev)
|
if (pm_runtime_suspended(dev) || !drm_dev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
private = drm_dev->dev_private;
|
||||||
|
|
||||||
drm_kms_helper_poll_disable(drm_dev);
|
drm_kms_helper_poll_disable(drm_dev);
|
||||||
exynos_drm_fbdev_suspend(drm_dev);
|
exynos_drm_fbdev_suspend(drm_dev);
|
||||||
private->suspend_state = drm_atomic_helper_suspend(drm_dev);
|
private->suspend_state = drm_atomic_helper_suspend(drm_dev);
|
||||||
|
@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
|
||||||
static int exynos_drm_resume(struct device *dev)
|
static int exynos_drm_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||||
struct exynos_drm_private *private = drm_dev->dev_private;
|
struct exynos_drm_private *private;
|
||||||
|
|
||||||
if (pm_runtime_suspended(dev) || !drm_dev)
|
if (pm_runtime_suspended(dev) || !drm_dev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
private = drm_dev->dev_private;
|
||||||
drm_atomic_helper_resume(drm_dev, private->suspend_state);
|
drm_atomic_helper_resume(drm_dev, private->suspend_state);
|
||||||
exynos_drm_fbdev_resume(drm_dev);
|
exynos_drm_fbdev_resume(drm_dev);
|
||||||
drm_kms_helper_poll_enable(drm_dev);
|
drm_kms_helper_poll_enable(drm_dev);
|
||||||
|
@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
|
||||||
|
|
||||||
kfree(drm->dev_private);
|
kfree(drm->dev_private);
|
||||||
drm->dev_private = NULL;
|
drm->dev_private = NULL;
|
||||||
|
dev_set_drvdata(dev, NULL);
|
||||||
|
|
||||||
drm_dev_unref(drm);
|
drm_dev_unref(drm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
|
||||||
|
|
||||||
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
|
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
|
|
||||||
int ring_id;
|
|
||||||
|
|
||||||
kfree(vgpu->sched_data);
|
kfree(vgpu->sched_data);
|
||||||
vgpu->sched_data = NULL;
|
vgpu->sched_data = NULL;
|
||||||
|
|
||||||
spin_lock_bh(&scheduler->mmio_context_lock);
|
|
||||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
|
||||||
if (scheduler->engine_owner[ring_id] == vgpu) {
|
|
||||||
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
|
|
||||||
scheduler->engine_owner[ring_id] = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||||
|
@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct intel_gvt_workload_scheduler *scheduler =
|
struct intel_gvt_workload_scheduler *scheduler =
|
||||||
&vgpu->gvt->scheduler;
|
&vgpu->gvt->scheduler;
|
||||||
|
int ring_id;
|
||||||
|
|
||||||
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
|
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
|
||||||
|
|
||||||
|
@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||||
scheduler->need_reschedule = true;
|
scheduler->need_reschedule = true;
|
||||||
scheduler->current_vgpu = NULL;
|
scheduler->current_vgpu = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_bh(&scheduler->mmio_context_lock);
|
||||||
|
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||||
|
if (scheduler->engine_owner[ring_id] == vgpu) {
|
||||||
|
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
|
||||||
|
scheduler->engine_owner[ring_id] = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||||
if (READ_ONCE(obj->mm.pages))
|
if (READ_ONCE(obj->mm.pages))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (obj->mm.madv != I915_MADV_WILLNEED)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
/* Before the pages are instantiated the object is treated as being
|
/* Before the pages are instantiated the object is treated as being
|
||||||
* in the CPU domain. The pages will be clflushed as required before
|
* in the CPU domain. The pages will be clflushed as required before
|
||||||
* use, and we can freely write into the pages directly. If userspace
|
* use, and we can freely write into the pages directly. If userspace
|
||||||
|
@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
||||||
|
|
||||||
static void nop_submit_request(struct drm_i915_gem_request *request)
|
static void nop_submit_request(struct drm_i915_gem_request *request)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
|
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
|
||||||
dma_fence_set_error(&request->fence, -EIO);
|
dma_fence_set_error(&request->fence, -EIO);
|
||||||
i915_gem_request_submit(request);
|
|
||||||
|
spin_lock_irqsave(&request->engine->timeline->lock, flags);
|
||||||
|
__i915_gem_request_submit(request);
|
||||||
intel_engine_init_global_seqno(request->engine, request->global_seqno);
|
intel_engine_init_global_seqno(request->engine, request->global_seqno);
|
||||||
|
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void engine_set_wedged(struct intel_engine_cs *engine)
|
static void engine_set_wedged(struct intel_engine_cs *engine)
|
||||||
|
|
|
@ -33,21 +33,20 @@
|
||||||
#include "intel_drv.h"
|
#include "intel_drv.h"
|
||||||
#include "i915_trace.h"
|
#include "i915_trace.h"
|
||||||
|
|
||||||
static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
|
static bool ggtt_is_idle(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
struct intel_engine_cs *engine;
|
||||||
struct intel_engine_cs *engine;
|
enum intel_engine_id id;
|
||||||
enum intel_engine_id id;
|
|
||||||
|
|
||||||
for_each_engine(engine, dev_priv, id) {
|
if (i915->gt.active_requests)
|
||||||
struct intel_timeline *tl;
|
return false;
|
||||||
|
|
||||||
tl = &ggtt->base.timeline.engine[engine->id];
|
for_each_engine(engine, i915, id) {
|
||||||
if (i915_gem_active_isset(&tl->last_request))
|
if (engine->last_retired_context != i915->kernel_context)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ggtt_flush(struct drm_i915_private *i915)
|
static int ggtt_flush(struct drm_i915_private *i915)
|
||||||
|
@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
||||||
min_size, alignment, cache_level,
|
min_size, alignment, cache_level,
|
||||||
start, end, mode);
|
start, end, mode);
|
||||||
|
|
||||||
/* Retire before we search the active list. Although we have
|
/*
|
||||||
|
* Retire before we search the active list. Although we have
|
||||||
* reasonable accuracy in our retirement lists, we may have
|
* reasonable accuracy in our retirement lists, we may have
|
||||||
* a stray pin (preventing eviction) that can only be resolved by
|
* a stray pin (preventing eviction) that can only be resolved by
|
||||||
* retiring.
|
* retiring.
|
||||||
|
@ -182,7 +182,8 @@ search_again:
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Can we unpin some objects such as idle hw contents,
|
/*
|
||||||
|
* Can we unpin some objects such as idle hw contents,
|
||||||
* or pending flips? But since only the GGTT has global entries
|
* or pending flips? But since only the GGTT has global entries
|
||||||
* such as scanouts, rinbuffers and contexts, we can skip the
|
* such as scanouts, rinbuffers and contexts, we can skip the
|
||||||
* purge when inspecting per-process local address spaces.
|
* purge when inspecting per-process local address spaces.
|
||||||
|
@ -190,19 +191,33 @@ search_again:
|
||||||
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
|
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
if (ggtt_is_idle(dev_priv)) {
|
/*
|
||||||
/* If we still have pending pageflip completions, drop
|
* Not everything in the GGTT is tracked via VMA using
|
||||||
* back to userspace to give our workqueues time to
|
* i915_vma_move_to_active(), otherwise we could evict as required
|
||||||
* acquire our locks and unpin the old scanouts.
|
* with minimal stalling. Instead we are forced to idle the GPU and
|
||||||
*/
|
* explicitly retire outstanding requests which will then remove
|
||||||
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
|
* the pinning for active objects such as contexts and ring,
|
||||||
|
* enabling us to evict them on the next iteration.
|
||||||
|
*
|
||||||
|
* To ensure that all user contexts are evictable, we perform
|
||||||
|
* a switch to the perma-pinned kernel context. This all also gives
|
||||||
|
* us a termination condition, when the last retired context is
|
||||||
|
* the kernel's there is no more we can evict.
|
||||||
|
*/
|
||||||
|
if (!ggtt_is_idle(dev_priv)) {
|
||||||
|
ret = ggtt_flush(dev_priv);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
goto search_again;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ggtt_flush(dev_priv);
|
/*
|
||||||
if (ret)
|
* If we still have pending pageflip completions, drop
|
||||||
return ret;
|
* back to userspace to give our workqueues time to
|
||||||
|
* acquire our locks and unpin the old scanouts.
|
||||||
goto search_again;
|
*/
|
||||||
|
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
|
||||||
|
|
||||||
found:
|
found:
|
||||||
/* drm_mm doesn't allow any other other operations while
|
/* drm_mm doesn't allow any other other operations while
|
||||||
|
|
|
@ -6998,6 +6998,7 @@ enum {
|
||||||
*/
|
*/
|
||||||
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
|
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
|
||||||
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
|
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
|
||||||
|
#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
|
||||||
|
|
||||||
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
|
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
|
||||||
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
|
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue